feat: add pink-purple theme, fix image paste race condition, allow @/commands anywhere in input
- Add Pink Purple theme (hot pink/purple/magenta on dark plum background) - Fix race condition where clearPastedImages() in input-area ran before the async message handler could read the images, silently dropping them - Allow @ file picker and / command menu to trigger at any cursor position, not just when the input is empty - Update CHANGELOG and README with new changes
This commit is contained in:
@@ -195,6 +195,12 @@ const processStreamChunk = (
|
||||
}
|
||||
},
|
||||
|
||||
usage: () => {
|
||||
if (chunk.usage) {
|
||||
callbacks.onUsage?.(chunk.usage);
|
||||
}
|
||||
},
|
||||
|
||||
done: () => {
|
||||
// Finalize all pending tool calls
|
||||
for (const partial of accumulator.toolCalls.values()) {
|
||||
@@ -657,6 +663,7 @@ export const runAgentLoopStream = async (
|
||||
finalResponse: "Execution aborted by user",
|
||||
iterations,
|
||||
toolCalls: allToolCalls,
|
||||
stopReason: "aborted",
|
||||
};
|
||||
}
|
||||
|
||||
@@ -726,13 +733,14 @@ export const runAgentLoopStream = async (
|
||||
if (allFailed) {
|
||||
consecutiveErrors++;
|
||||
if (consecutiveErrors >= MAX_CONSECUTIVE_ERRORS) {
|
||||
const errorMsg = `Stopping: ${consecutiveErrors} consecutive tool errors. Check model compatibility with tool calling.`;
|
||||
const errorMsg = `Stopping after ${consecutiveErrors} consecutive tool errors. Check model compatibility with tool calling.`;
|
||||
state.options.onError?.(errorMsg);
|
||||
return {
|
||||
success: false,
|
||||
finalResponse: errorMsg,
|
||||
iterations,
|
||||
toolCalls: allToolCalls,
|
||||
stopReason: "consecutive_errors",
|
||||
};
|
||||
}
|
||||
}
|
||||
@@ -751,12 +759,18 @@ export const runAgentLoopStream = async (
|
||||
finalResponse: `Error: ${errorMessage}`,
|
||||
iterations,
|
||||
toolCalls: allToolCalls,
|
||||
stopReason: "error",
|
||||
};
|
||||
}
|
||||
}
|
||||
|
||||
if (iterations >= maxIterations) {
|
||||
state.options.onWarning?.(`Reached max iterations (${maxIterations})`);
|
||||
const hitMaxIterations = iterations >= maxIterations;
|
||||
|
||||
if (hitMaxIterations) {
|
||||
const warnMsg = `Agent reached max iterations (${maxIterations}). ` +
|
||||
`Completed ${allToolCalls.length} tool call(s) across ${iterations} iteration(s). ` +
|
||||
`The task may be incomplete — you can send another message to continue.`;
|
||||
state.options.onWarning?.(warnMsg);
|
||||
}
|
||||
|
||||
return {
|
||||
@@ -764,6 +778,7 @@ export const runAgentLoopStream = async (
|
||||
finalResponse,
|
||||
iterations,
|
||||
toolCalls: allToolCalls,
|
||||
stopReason: hitMaxIterations ? "max_iterations" : "completed",
|
||||
};
|
||||
};
|
||||
|
||||
|
||||
@@ -4,6 +4,7 @@
|
||||
|
||||
import { saveSession as saveSessionSession } from "@services/core/session";
|
||||
import { appStore } from "@tui-solid/context/app";
|
||||
import { getMessageText } from "@/types/providers";
|
||||
import { CHAT_MESSAGES, type CommandName } from "@constants/chat-service";
|
||||
import { handleLogin, handleLogout, showWhoami } from "@services/chat-tui/auth";
|
||||
import {
|
||||
@@ -44,7 +45,7 @@ const saveSession: CommandHandler = async (_, callbacks) => {
|
||||
|
||||
const showContext: CommandHandler = (state, callbacks) => {
|
||||
const tokenEstimate = state.messages.reduce(
|
||||
(sum, msg) => sum + Math.ceil(msg.content.length / 4),
|
||||
(sum, msg) => sum + Math.ceil(getMessageText(msg.content).length / 4),
|
||||
0,
|
||||
);
|
||||
callbacks.onLog(
|
||||
|
||||
@@ -19,6 +19,8 @@ import {
|
||||
buildCompletePrompt,
|
||||
} from "@services/prompt-builder";
|
||||
import { initSuggestionService } from "@services/command-suggestion-service";
|
||||
import { initializeRegistry as initializeSkillRegistry } from "@services/skill-registry";
|
||||
import { initializeKeybinds } from "@services/keybind-resolver";
|
||||
import * as brainService from "@services/brain";
|
||||
import { BRAIN_DISABLED } from "@constants/brain";
|
||||
import { addContextFile } from "@services/chat-tui/files";
|
||||
@@ -27,6 +29,8 @@ import type { ChatSession } from "@/types/common";
|
||||
import type { ChatTUIOptions } from "@interfaces/ChatTUIOptions";
|
||||
import type { ChatServiceState } from "@/types/chat-service";
|
||||
import type { InteractionMode } from "@/types/tui";
|
||||
import { getModelContextSize } from "@constants/copilot";
|
||||
import { getDefaultModel } from "@providers/core/chat";
|
||||
|
||||
const createInitialState = async (
|
||||
options: ChatTUIOptions,
|
||||
@@ -223,6 +227,10 @@ export const initializeChatService = async (
|
||||
|
||||
const session = await initializeSession(state, options);
|
||||
|
||||
// Set context max tokens based on the resolved provider + model
|
||||
const effectiveModel = state.model ?? getDefaultModel(state.provider);
|
||||
appStore.setContextMaxTokens(getModelContextSize(effectiveModel).input);
|
||||
|
||||
if (state.messages.length === 0) {
|
||||
state.messages.push({ role: "system", content: state.systemPrompt });
|
||||
}
|
||||
@@ -231,6 +239,8 @@ export const initializeChatService = async (
|
||||
await Promise.all([
|
||||
addInitialContextFiles(state, options.files),
|
||||
initializePermissions(),
|
||||
initializeSkillRegistry(),
|
||||
initializeKeybinds(),
|
||||
]);
|
||||
|
||||
initSuggestionService(process.cwd());
|
||||
|
||||
@@ -7,6 +7,7 @@ import {
|
||||
LEARNING_CONFIDENCE_THRESHOLD,
|
||||
MAX_LEARNINGS_DISPLAY,
|
||||
} from "@constants/chat-service";
|
||||
import { getMessageText } from "@/types/providers";
|
||||
import {
|
||||
detectLearnings,
|
||||
saveLearning,
|
||||
@@ -35,8 +36,8 @@ export const handleRememberCommand = async (
|
||||
}
|
||||
|
||||
const candidates = detectLearnings(
|
||||
lastUserMsg.content,
|
||||
lastAssistantMsg.content,
|
||||
getMessageText(lastUserMsg.content),
|
||||
getMessageText(lastAssistantMsg.content),
|
||||
);
|
||||
|
||||
if (candidates.length === 0) {
|
||||
|
||||
@@ -2,6 +2,7 @@
|
||||
* Chat TUI message handling
|
||||
*/
|
||||
|
||||
import { v4 as uuidv4 } from "uuid";
|
||||
import { addMessage, saveSession } from "@services/core/session";
|
||||
import {
|
||||
createStreamingAgent,
|
||||
@@ -56,6 +57,7 @@ import { PROVIDER_IDS } from "@constants/provider-quality";
|
||||
import { appStore } from "@tui-solid/context/app";
|
||||
import type { StreamCallbacks } from "@/types/streaming";
|
||||
import type { TaskType } from "@/types/provider-quality";
|
||||
import type { ContentPart, MessageContent } from "@/types/providers";
|
||||
import type {
|
||||
ChatServiceState,
|
||||
ChatServiceCallbacks,
|
||||
@@ -69,6 +71,12 @@ import {
|
||||
executeDetectedCommand,
|
||||
} from "@services/command-detection";
|
||||
import { detectSkillCommand, executeSkill } from "@services/skill-service";
|
||||
import {
|
||||
buildSkillInjectionForPrompt,
|
||||
getDetectedSkillsSummary,
|
||||
} from "@services/skill-registry";
|
||||
import { stripMarkdown } from "@/utils/markdown/strip";
|
||||
import { createThinkingParser } from "@services/reasoning/thinking-parser";
|
||||
import {
|
||||
getActivePlans,
|
||||
isApprovalMessage,
|
||||
@@ -105,7 +113,9 @@ export const abortCurrentOperation = async (
|
||||
appStore.setMode("idle");
|
||||
addDebugLog(
|
||||
"state",
|
||||
rollback ? "Operation aborted with rollback" : "Operation aborted by user",
|
||||
rollback
|
||||
? "Operation aborted with rollback"
|
||||
: "Operation aborted by user",
|
||||
);
|
||||
return true;
|
||||
}
|
||||
@@ -213,6 +223,48 @@ export const getExecutionState = (): {
|
||||
};
|
||||
};
|
||||
|
||||
/**
|
||||
* Extract file path(s) from a tool call's arguments.
|
||||
*
|
||||
* Different tools store the path in different places:
|
||||
* - write / edit / delete : `args.filePath` or `args.path`
|
||||
* - multi_edit : `args.edits[].file_path`
|
||||
* - apply_patch : `args.targetFile` (or parsed from patch header)
|
||||
* - bash : no reliable path, skip
|
||||
*/
|
||||
const extractToolPaths = (
|
||||
toolName: string,
|
||||
args?: Record<string, unknown>,
|
||||
): { primary?: string; all: string[] } => {
|
||||
if (!args) return { all: [] };
|
||||
|
||||
// Standard single-file tools
|
||||
const singlePath =
|
||||
(args.filePath as string) ??
|
||||
(args.file_path as string) ??
|
||||
(args.path as string);
|
||||
|
||||
if (singlePath && toolName !== "multi_edit") {
|
||||
return { primary: String(singlePath), all: [String(singlePath)] };
|
||||
}
|
||||
|
||||
// multi_edit: array of edits with file_path
|
||||
if (toolName === "multi_edit" && Array.isArray(args.edits)) {
|
||||
const paths = (args.edits as Array<{ file_path?: string }>)
|
||||
.map((e) => e.file_path)
|
||||
.filter((p): p is string => Boolean(p));
|
||||
const unique = [...new Set(paths)];
|
||||
return { primary: unique[0], all: unique };
|
||||
}
|
||||
|
||||
// apply_patch: targetFile override or embedded in patch content
|
||||
if (toolName === "apply_patch" && args.targetFile) {
|
||||
return { primary: String(args.targetFile), all: [String(args.targetFile)] };
|
||||
}
|
||||
|
||||
return { all: [] };
|
||||
};
|
||||
|
||||
const createToolCallHandler =
|
||||
(
|
||||
callbacks: ChatServiceCallbacks,
|
||||
@@ -220,11 +272,13 @@ const createToolCallHandler =
|
||||
) =>
|
||||
(call: { id: string; name: string; arguments?: Record<string, unknown> }) => {
|
||||
const args = call.arguments;
|
||||
if (
|
||||
(FILE_MODIFYING_TOOLS as readonly string[]).includes(call.name) &&
|
||||
args?.path
|
||||
) {
|
||||
toolCallRef.current = { name: call.name, path: String(args.path) };
|
||||
const isModifying = (FILE_MODIFYING_TOOLS as readonly string[]).includes(
|
||||
call.name,
|
||||
);
|
||||
|
||||
if (isModifying) {
|
||||
const { primary, all } = extractToolPaths(call.name, args);
|
||||
toolCallRef.current = { name: call.name, path: primary, paths: all };
|
||||
} else {
|
||||
toolCallRef.current = { name: call.name };
|
||||
}
|
||||
@@ -238,6 +292,28 @@ const createToolCallHandler =
|
||||
});
|
||||
};
|
||||
|
||||
/**
|
||||
* Estimate additions/deletions from tool output text
|
||||
*/
|
||||
const estimateChanges = (
|
||||
output: string,
|
||||
): { additions: number; deletions: number } => {
|
||||
let additions = 0;
|
||||
let deletions = 0;
|
||||
|
||||
for (const line of output.split("\n")) {
|
||||
if (line.startsWith("+") && !line.startsWith("+++")) additions++;
|
||||
else if (line.startsWith("-") && !line.startsWith("---")) deletions++;
|
||||
}
|
||||
|
||||
// Fallback estimate when no diff markers are found
|
||||
if (additions === 0 && deletions === 0 && output.length > 0) {
|
||||
additions = output.split("\n").length;
|
||||
}
|
||||
|
||||
return { additions, deletions };
|
||||
};
|
||||
|
||||
const createToolResultHandler =
|
||||
(
|
||||
callbacks: ChatServiceCallbacks,
|
||||
@@ -252,8 +328,33 @@ const createToolResultHandler =
|
||||
error?: string;
|
||||
},
|
||||
) => {
|
||||
if (result.success && toolCallRef.current?.path) {
|
||||
analyzeFileChange(toolCallRef.current.path);
|
||||
const ref = toolCallRef.current;
|
||||
|
||||
if (result.success && ref) {
|
||||
const output = result.output ?? "";
|
||||
const paths = ref.paths?.length ? ref.paths : ref.path ? [ref.path] : [];
|
||||
|
||||
if (paths.length > 0) {
|
||||
const { additions, deletions } = estimateChanges(output);
|
||||
|
||||
// Distribute changes across paths (or assign all to the single path)
|
||||
const perFile = paths.length > 1
|
||||
? {
|
||||
additions: Math.max(1, Math.ceil(additions / paths.length)),
|
||||
deletions: Math.ceil(deletions / paths.length),
|
||||
}
|
||||
: { additions, deletions };
|
||||
|
||||
for (const filePath of paths) {
|
||||
analyzeFileChange(filePath);
|
||||
appStore.addModifiedFile({
|
||||
filePath,
|
||||
additions: perFile.additions,
|
||||
deletions: perFile.deletions,
|
||||
lastModified: Date.now(),
|
||||
});
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
callbacks.onToolResult(
|
||||
@@ -270,6 +371,34 @@ const createToolResultHandler =
|
||||
*/
|
||||
const createStreamCallbacks = (): StreamCallbacksWithState => {
|
||||
let chunkCount = 0;
|
||||
let currentSegmentHasContent = false;
|
||||
let receivedUsage = false;
|
||||
const thinkingParser = createThinkingParser();
|
||||
|
||||
const emitThinking = (thinking: string | null): void => {
|
||||
if (!thinking) return;
|
||||
appStore.addLog({ type: "thinking", content: thinking });
|
||||
};
|
||||
|
||||
/**
|
||||
* Finalize the current streaming segment (if it has content) so that
|
||||
* tool logs appear below the pre-tool text and a new streaming segment
|
||||
* can be started afterward for post-tool text (e.g. summary).
|
||||
*/
|
||||
const finalizeCurrentSegment = (): void => {
|
||||
if (!currentSegmentHasContent) return;
|
||||
|
||||
// Flush thinking parser before finalizing the segment
|
||||
const flushed = thinkingParser.flush();
|
||||
if (flushed.visible) {
|
||||
appStore.appendStreamContent(flushed.visible);
|
||||
}
|
||||
emitThinking(flushed.thinking);
|
||||
|
||||
appStore.completeStreaming();
|
||||
currentSegmentHasContent = false;
|
||||
addDebugLog("stream", "Finalized streaming segment before tool call");
|
||||
};
|
||||
|
||||
const callbacks: StreamCallbacks = {
|
||||
onContentChunk: (content: string) => {
|
||||
@@ -278,11 +407,30 @@ const createStreamCallbacks = (): StreamCallbacksWithState => {
|
||||
"stream",
|
||||
`Chunk #${chunkCount}: "${content.substring(0, 30)}${content.length > 30 ? "..." : ""}"`,
|
||||
);
|
||||
appStore.appendStreamContent(content);
|
||||
|
||||
// Feed through the thinking parser — only append visible content.
|
||||
// <thinking>…</thinking> blocks are stripped and emitted separately.
|
||||
const result = thinkingParser.feed(content);
|
||||
if (result.visible) {
|
||||
// If the previous streaming segment was finalized (e.g. before a tool call),
|
||||
// start a new one so post-tool text appears after tool output logs.
|
||||
if (!currentSegmentHasContent && !appStore.getState().streamingLog.isStreaming) {
|
||||
appStore.startStreaming();
|
||||
addDebugLog("stream", "Started new streaming segment for post-tool content");
|
||||
}
|
||||
appStore.appendStreamContent(result.visible);
|
||||
currentSegmentHasContent = true;
|
||||
}
|
||||
emitThinking(result.thinking);
|
||||
},
|
||||
|
||||
onToolCallStart: (toolCall) => {
|
||||
addDebugLog("tool", `Tool start: ${toolCall.name} (${toolCall.id})`);
|
||||
|
||||
// Finalize accumulated streaming text so it stays above tool output
|
||||
// and the post-tool summary will appear below.
|
||||
finalizeCurrentSegment();
|
||||
|
||||
appStore.setCurrentToolCall({
|
||||
id: toolCall.id,
|
||||
name: toolCall.name,
|
||||
@@ -308,7 +456,28 @@ const createStreamCallbacks = (): StreamCallbacksWithState => {
|
||||
});
|
||||
},
|
||||
|
||||
onUsage: (usage) => {
|
||||
receivedUsage = true;
|
||||
addDebugLog(
|
||||
"api",
|
||||
`Token usage: prompt=${usage.promptTokens}, completion=${usage.completionTokens}`,
|
||||
);
|
||||
appStore.addTokens(usage.promptTokens, usage.completionTokens);
|
||||
},
|
||||
|
||||
onComplete: () => {
|
||||
// Flush any remaining buffered content from the thinking parser
|
||||
const flushed = thinkingParser.flush();
|
||||
if (flushed.visible) {
|
||||
// Ensure a streaming log exists if we're flushing post-tool content
|
||||
if (!currentSegmentHasContent && !appStore.getState().streamingLog.isStreaming) {
|
||||
appStore.startStreaming();
|
||||
}
|
||||
appStore.appendStreamContent(flushed.visible);
|
||||
currentSegmentHasContent = true;
|
||||
}
|
||||
emitThinking(flushed.thinking);
|
||||
|
||||
// Note: Don't call completeStreaming() here!
|
||||
// The agent loop may have multiple iterations (tool calls + final response)
|
||||
// Streaming will be completed manually after the entire agent finishes
|
||||
@@ -320,6 +489,7 @@ const createStreamCallbacks = (): StreamCallbacksWithState => {
|
||||
|
||||
onError: (error: string) => {
|
||||
addDebugLog("error", `Stream error: ${error}`);
|
||||
thinkingParser.reset();
|
||||
appStore.cancelStreaming();
|
||||
appStore.addLog({
|
||||
type: "error",
|
||||
@@ -331,6 +501,7 @@ const createStreamCallbacks = (): StreamCallbacksWithState => {
|
||||
return {
|
||||
callbacks,
|
||||
hasReceivedContent: () => chunkCount > 0,
|
||||
hasReceivedUsage: () => receivedUsage,
|
||||
};
|
||||
};
|
||||
|
||||
@@ -426,7 +597,10 @@ export const handleMessage = async (
|
||||
if (isApprovalMessage(message)) {
|
||||
approvePlan(plan.id, message);
|
||||
startPlanExecution(plan.id);
|
||||
callbacks.onLog("system", `Plan "${plan.title}" approved. Proceeding with implementation.`);
|
||||
callbacks.onLog(
|
||||
"system",
|
||||
`Plan "${plan.title}" approved. Proceeding with implementation.`,
|
||||
);
|
||||
addDebugLog("state", `Plan ${plan.id} approved by user`);
|
||||
|
||||
// Continue with agent execution - the agent will see the approved status
|
||||
@@ -438,7 +612,10 @@ export const handleMessage = async (
|
||||
// Fall through to normal agent processing
|
||||
} else if (isRejectionMessage(message)) {
|
||||
rejectPlan(plan.id, message);
|
||||
callbacks.onLog("system", `Plan "${plan.title}" rejected. Please provide feedback or a new approach.`);
|
||||
callbacks.onLog(
|
||||
"system",
|
||||
`Plan "${plan.title}" rejected. Please provide feedback or a new approach.`,
|
||||
);
|
||||
addDebugLog("state", `Plan ${plan.id} rejected by user`);
|
||||
|
||||
// Add rejection to messages so agent can respond
|
||||
@@ -449,7 +626,10 @@ export const handleMessage = async (
|
||||
// Fall through to normal agent processing to get revised plan
|
||||
} else {
|
||||
// Neither approval nor rejection - treat as feedback/modification request
|
||||
callbacks.onLog("system", `Plan "${plan.title}" awaiting approval. Reply 'yes' to approve or 'no' to reject.`);
|
||||
callbacks.onLog(
|
||||
"system",
|
||||
`Plan "${plan.title}" awaiting approval. Reply 'yes' to approve or 'no' to reject.`,
|
||||
);
|
||||
|
||||
// Show the plan again with the feedback
|
||||
const planDisplay = formatPlanForDisplay(plan);
|
||||
@@ -611,6 +791,90 @@ export const handleMessage = async (
|
||||
const { enrichedMessage, issues } =
|
||||
await enrichMessageWithIssues(processedMessage);
|
||||
|
||||
// Inline @mention subagent invocation (e.g. "Find all API endpoints @explore")
|
||||
try {
|
||||
const mentionRegex = /@([a-zA-Z_]+)/g;
|
||||
const mentionMap: Record<string, string> = {
|
||||
explore: "explore",
|
||||
general: "implement",
|
||||
plan: "plan",
|
||||
};
|
||||
|
||||
const mentions: string[] = [];
|
||||
let m: RegExpExecArray | null;
|
||||
while ((m = mentionRegex.exec(message))) {
|
||||
const key = m[1]?.toLowerCase();
|
||||
if (key && mentionMap[key]) mentions.push(key);
|
||||
}
|
||||
|
||||
if (mentions.length > 0) {
|
||||
// Clean message to use as task prompt (remove mentions)
|
||||
const cleaned = enrichedMessage.replace(/@[a-zA-Z_]+/g, "").trim();
|
||||
|
||||
// Lazy import task agent helpers (avoid circular deps)
|
||||
const { executeTaskAgent, getBackgroundAgentStatus } =
|
||||
await import("@/tools/task-agent/execute");
|
||||
const { v4: uuidv4 } = await import("uuid");
|
||||
|
||||
// Minimal tool context for invoking the task agent
|
||||
const toolCtx = {
|
||||
sessionId: uuidv4(),
|
||||
messageId: uuidv4(),
|
||||
workingDir: process.cwd(),
|
||||
abort: new AbortController(),
|
||||
autoApprove: true,
|
||||
onMetadata: () => {},
|
||||
} as any;
|
||||
|
||||
for (const key of mentions) {
|
||||
const agentType = mentionMap[key];
|
||||
try {
|
||||
const params = {
|
||||
agent_type: agentType,
|
||||
task: cleaned || message,
|
||||
run_in_background: true,
|
||||
} as any;
|
||||
|
||||
const startResult = await executeTaskAgent(params, toolCtx);
|
||||
|
||||
// Show started message in UI
|
||||
appStore.addLog({
|
||||
type: "system",
|
||||
content: `Started subagent @${key} (ID: ${startResult.metadata?.agentId ?? "?"}).`,
|
||||
});
|
||||
|
||||
// Poll briefly for completion and attach result if ready
|
||||
const agentId = startResult.metadata?.agentId as string | undefined;
|
||||
if (agentId) {
|
||||
const maxAttempts = 10;
|
||||
const interval = 300;
|
||||
for (let i = 0; i < maxAttempts; i++) {
|
||||
// eslint-disable-next-line no-await-in-loop
|
||||
const status = await getBackgroundAgentStatus(agentId);
|
||||
if (status && status.success && status.output) {
|
||||
// Attach assistant result to conversation
|
||||
appStore.addLog({ type: "assistant", content: status.output });
|
||||
addMessage("assistant", status.output);
|
||||
await saveSession();
|
||||
break;
|
||||
}
|
||||
// eslint-disable-next-line no-await-in-loop
|
||||
await new Promise((res) => setTimeout(res, interval));
|
||||
}
|
||||
}
|
||||
} catch (err) {
|
||||
appStore.addLog({
|
||||
type: "error",
|
||||
content: `Subagent @${key} failed to start: ${String(err)}`,
|
||||
});
|
||||
}
|
||||
}
|
||||
}
|
||||
} catch (err) {
|
||||
// Non-fatal - don't block main flow on subagent helpers
|
||||
addDebugLog("error", `Subagent invocation error: ${String(err)}`);
|
||||
}
|
||||
|
||||
if (issues.length > 0) {
|
||||
callbacks.onLog(
|
||||
"system",
|
||||
@@ -623,7 +887,35 @@ export const handleMessage = async (
|
||||
|
||||
const userMessage = buildContextMessage(state, enrichedMessage);
|
||||
|
||||
state.messages.push({ role: "user", content: userMessage });
|
||||
// Build multimodal content if there are pasted images
|
||||
const { pastedImages } = appStore.getState();
|
||||
let messageContent: MessageContent = userMessage;
|
||||
|
||||
if (pastedImages.length > 0) {
|
||||
const parts: ContentPart[] = [
|
||||
{ type: "text", text: userMessage },
|
||||
];
|
||||
|
||||
for (const img of pastedImages) {
|
||||
parts.push({
|
||||
type: "image_url",
|
||||
image_url: {
|
||||
url: `data:${img.mediaType};base64,${img.data}`,
|
||||
detail: "auto",
|
||||
},
|
||||
});
|
||||
}
|
||||
|
||||
messageContent = parts;
|
||||
addDebugLog(
|
||||
"info",
|
||||
`[images] Attached ${pastedImages.length} image(s) to user message`,
|
||||
);
|
||||
// Images are consumed; clear from store
|
||||
appStore.clearPastedImages();
|
||||
}
|
||||
|
||||
state.messages.push({ role: "user", content: messageContent });
|
||||
|
||||
clearSuggestions();
|
||||
|
||||
@@ -707,6 +999,37 @@ export const handleMessage = async (
|
||||
? state.model
|
||||
: getDefaultModel(effectiveProvider);
|
||||
|
||||
// Auto-detect and inject relevant skills based on the user prompt.
|
||||
// Skills are activated transparently and their instructions are injected
|
||||
// into the conversation as a system message so the agent benefits from
|
||||
// specialized knowledge (e.g., TypeScript, React, Security, etc.).
|
||||
try {
|
||||
const { injection, detected } =
|
||||
await buildSkillInjectionForPrompt(message);
|
||||
if (detected.length > 0 && injection) {
|
||||
const summary = getDetectedSkillsSummary(detected);
|
||||
addDebugLog("info", `[skills] ${summary}`);
|
||||
callbacks.onLog("system", summary);
|
||||
|
||||
// Inject skill context as a system message right before the user message
|
||||
// so the agent has specialized knowledge for this prompt.
|
||||
const insertIdx = Math.max(0, state.messages.length - 1);
|
||||
state.messages.splice(insertIdx, 0, {
|
||||
role: "system" as const,
|
||||
content: injection,
|
||||
});
|
||||
addDebugLog(
|
||||
"info",
|
||||
`[skills] Injected ${detected.length} skill(s) as system context`,
|
||||
);
|
||||
}
|
||||
} catch (error) {
|
||||
addDebugLog(
|
||||
"error",
|
||||
`Skill detection failed: ${error instanceof Error ? error.message : String(error)}`,
|
||||
);
|
||||
}
|
||||
|
||||
// Start streaming UI
|
||||
addDebugLog(
|
||||
"state",
|
||||
@@ -731,8 +1054,10 @@ export const handleMessage = async (
|
||||
autoApprove: state.autoApprove,
|
||||
chatMode: isReadOnlyMode,
|
||||
onText: (text: string) => {
|
||||
// Note: Do NOT call appStore.appendStreamContent() here.
|
||||
// Streaming content is already handled by onContentChunk in streamState.callbacks.
|
||||
// Calling appendStreamContent from both onText and onContentChunk causes double content.
|
||||
addDebugLog("info", `onText callback: "${text.substring(0, 50)}..."`);
|
||||
appStore.appendStreamContent(text);
|
||||
},
|
||||
onToolCall: createToolCallHandler(callbacks, toolCallRef),
|
||||
onToolResult: createToolResultHandler(callbacks, toolCallRef),
|
||||
@@ -758,7 +1083,10 @@ export const handleMessage = async (
|
||||
onStepModeDisabled: () => {
|
||||
addDebugLog("state", "Step mode disabled");
|
||||
},
|
||||
onWaitingForStep: (toolName: string, _toolArgs: Record<string, unknown>) => {
|
||||
onWaitingForStep: (
|
||||
toolName: string,
|
||||
_toolArgs: Record<string, unknown>,
|
||||
) => {
|
||||
appStore.addLog({
|
||||
type: "system",
|
||||
content: `⏳ Step mode: Ready to execute ${toolName}. Press Enter to continue.`,
|
||||
@@ -766,14 +1094,20 @@ export const handleMessage = async (
|
||||
addDebugLog("state", `Waiting for step: ${toolName}`);
|
||||
},
|
||||
onAbort: (rollbackCount: number) => {
|
||||
addDebugLog("state", `Abort initiated, ${rollbackCount} actions to rollback`);
|
||||
addDebugLog(
|
||||
"state",
|
||||
`Abort initiated, ${rollbackCount} actions to rollback`,
|
||||
);
|
||||
},
|
||||
onRollback: (action: { type: string; description: string }) => {
|
||||
appStore.addLog({
|
||||
type: "system",
|
||||
content: `↩ Rolling back: ${action.description}`,
|
||||
});
|
||||
addDebugLog("state", `Rollback: ${action.type} - ${action.description}`);
|
||||
addDebugLog(
|
||||
"state",
|
||||
`Rollback: ${action.type} - ${action.description}`,
|
||||
);
|
||||
},
|
||||
onRollbackComplete: (actionsRolledBack: number) => {
|
||||
appStore.addLog({
|
||||
@@ -788,20 +1122,33 @@ export const handleMessage = async (
|
||||
// Store agent reference for abort capability
|
||||
currentAgent = agent;
|
||||
|
||||
try {
|
||||
addDebugLog(
|
||||
"api",
|
||||
`Agent.run() started with ${state.messages.length} messages`,
|
||||
);
|
||||
const result = await agent.run(state.messages);
|
||||
addDebugLog(
|
||||
"api",
|
||||
`Agent.run() completed: success=${result.success}, iterations=${result.iterations}`,
|
||||
);
|
||||
|
||||
/**
|
||||
* Process the result of an agent run: finalize streaming, show stop reason,
|
||||
* persist to session.
|
||||
*/
|
||||
const processAgentResult = async (
|
||||
result: Awaited<ReturnType<typeof agent.run>>,
|
||||
userMessage: string,
|
||||
): Promise<void> => {
|
||||
// Stop thinking timer
|
||||
appStore.stopThinking();
|
||||
|
||||
// If the stream didn't deliver API-reported usage data, estimate tokens
|
||||
// from message lengths so the context counter never stays stuck at 0.
|
||||
if (!streamState.hasReceivedUsage()) {
|
||||
const inputEstimate = Math.ceil(userMessage.length / 4);
|
||||
const outputEstimate = Math.ceil((result.finalResponse?.length ?? 0) / 4);
|
||||
// Add tool I/O overhead: each tool call/result adds tokens
|
||||
const toolOverhead = result.toolCalls.length * 150; // ~150 tokens per tool exchange
|
||||
if (inputEstimate > 0 || outputEstimate > 0) {
|
||||
appStore.addTokens(inputEstimate + toolOverhead, outputEstimate + toolOverhead);
|
||||
addDebugLog(
|
||||
"info",
|
||||
`Token estimate (no API usage): ~${inputEstimate + toolOverhead} in, ~${outputEstimate + toolOverhead} out`,
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
if (result.finalResponse) {
|
||||
addDebugLog(
|
||||
"info",
|
||||
@@ -812,7 +1159,7 @@ export const handleMessage = async (
|
||||
// Run audit if cascade mode with Ollama
|
||||
if (shouldAudit && effectiveProvider === "ollama") {
|
||||
const auditResult = await runAudit(
|
||||
message,
|
||||
userMessage,
|
||||
result.finalResponse,
|
||||
callbacks,
|
||||
);
|
||||
@@ -844,30 +1191,36 @@ export const handleMessage = async (
|
||||
content: finalResponse,
|
||||
});
|
||||
|
||||
// Check if streaming content was received - if not, add the response as a log
|
||||
// This handles cases where streaming didn't work or content was all in final response
|
||||
if (!streamState.hasReceivedContent() && finalResponse) {
|
||||
// Single source of truth: decide based on whether the provider
|
||||
// actually streamed visible content, not whether we asked for streaming.
|
||||
const streamedContent = streamState.hasReceivedContent();
|
||||
|
||||
if (streamedContent) {
|
||||
// Streaming delivered content — finalize the last streaming segment.
|
||||
addDebugLog("info", "Completing streaming with received content");
|
||||
if (appStore.getState().streamingLog.isStreaming) {
|
||||
appStore.completeStreaming();
|
||||
}
|
||||
} else if (finalResponse) {
|
||||
addDebugLog(
|
||||
"info",
|
||||
"No streaming content received, adding fallback log",
|
||||
);
|
||||
// Streaming didn't receive content, manually add the response
|
||||
appStore.cancelStreaming(); // Remove empty streaming log
|
||||
if (appStore.getState().streamingLog.isStreaming) {
|
||||
appStore.cancelStreaming();
|
||||
}
|
||||
appStore.addLog({
|
||||
type: "assistant",
|
||||
content: finalResponse,
|
||||
content: stripMarkdown(finalResponse),
|
||||
});
|
||||
} else {
|
||||
// Streaming received content - finalize the streaming log
|
||||
addDebugLog("info", "Completing streaming with received content");
|
||||
appStore.completeStreaming();
|
||||
}
|
||||
|
||||
addMessage("user", message);
|
||||
// Persist to session
|
||||
addMessage("user", userMessage);
|
||||
addMessage("assistant", finalResponse);
|
||||
await saveSession();
|
||||
|
||||
await processLearningsFromExchange(message, finalResponse, callbacks);
|
||||
await processLearningsFromExchange(userMessage, finalResponse, callbacks);
|
||||
|
||||
const suggestions = getPendingSuggestions();
|
||||
if (suggestions.length > 0) {
|
||||
@@ -875,6 +1228,130 @@ export const handleMessage = async (
|
||||
callbacks.onLog("system", formatted);
|
||||
}
|
||||
}
|
||||
|
||||
// Show agent stop reason to the user so they know why it ended
|
||||
const stopReason = result.stopReason ?? "completed";
|
||||
const toolCount = result.toolCalls.length;
|
||||
const iters = result.iterations;
|
||||
|
||||
if (stopReason === "max_iterations") {
|
||||
appStore.addLog({
|
||||
type: "system",
|
||||
content: `Agent stopped: reached max iterations (${iters}). ` +
|
||||
`${toolCount} tool call(s) completed. ` +
|
||||
`Send another message to continue where it left off.`,
|
||||
});
|
||||
} else if (stopReason === "consecutive_errors") {
|
||||
appStore.addLog({
|
||||
type: "error",
|
||||
content: `Agent stopped: repeated tool failures. ${toolCount} tool call(s) attempted across ${iters} iteration(s).`,
|
||||
});
|
||||
} else if (stopReason === "aborted") {
|
||||
appStore.addLog({
|
||||
type: "system",
|
||||
content: `Agent aborted by user after ${iters} iteration(s) and ${toolCount} tool call(s).`,
|
||||
});
|
||||
} else if (stopReason === "error") {
|
||||
appStore.addLog({
|
||||
type: "error",
|
||||
content: `Agent encountered an error after ${iters} iteration(s) and ${toolCount} tool call(s).`,
|
||||
});
|
||||
} else if (stopReason === "completed" && toolCount > 0) {
|
||||
// Only show a summary for non-trivial agent runs (with tool calls)
|
||||
appStore.addLog({
|
||||
type: "system",
|
||||
content: `Agent completed: ${toolCount} tool call(s) in ${iters} iteration(s).`,
|
||||
});
|
||||
}
|
||||
};
|
||||
|
||||
try {
|
||||
addDebugLog(
|
||||
"api",
|
||||
`Agent.run() started with ${state.messages.length} messages`,
|
||||
);
|
||||
let result = await agent.run(state.messages);
|
||||
addDebugLog(
|
||||
"api",
|
||||
`Agent.run() completed: success=${result.success}, iterations=${result.iterations}, stopReason=${result.stopReason}`,
|
||||
);
|
||||
|
||||
await processAgentResult(result, message);
|
||||
|
||||
// After agent finishes, check for pending plans and auto-continue on approval
|
||||
let continueAfterPlan = true;
|
||||
while (continueAfterPlan) {
|
||||
continueAfterPlan = false;
|
||||
|
||||
const newPendingPlans = getActivePlans().filter(
|
||||
(p) => p.status === "pending",
|
||||
);
|
||||
if (newPendingPlans.length === 0) break;
|
||||
|
||||
const plan = newPendingPlans[0];
|
||||
const planContent = formatPlanForDisplay(plan);
|
||||
addDebugLog("state", `Showing plan approval modal: ${plan.id}`);
|
||||
|
||||
const approved = await new Promise<boolean>((resolve) => {
|
||||
appStore.setMode("plan_approval");
|
||||
appStore.setPlanApprovalPrompt({
|
||||
id: uuidv4(),
|
||||
planTitle: plan.title,
|
||||
planSummary: plan.summary,
|
||||
planContent,
|
||||
resolve: (response) => {
|
||||
appStore.setPlanApprovalPrompt(null);
|
||||
|
||||
if (response.approved) {
|
||||
approvePlan(plan.id, response.editMode);
|
||||
startPlanExecution(plan.id);
|
||||
addDebugLog("state", `Plan ${plan.id} approved via modal`);
|
||||
appStore.addLog({
|
||||
type: "system",
|
||||
content: `Plan "${plan.title}" approved. Continuing implementation...`,
|
||||
});
|
||||
|
||||
state.messages.push({
|
||||
role: "user",
|
||||
content: `The user approved the plan "${plan.title}". ` +
|
||||
`Proceed with the full implementation — complete ALL steps in the plan. ` +
|
||||
`Do not stop until every step is done or you need further user input.`,
|
||||
});
|
||||
} else {
|
||||
rejectPlan(plan.id, response.feedback ?? "User cancelled");
|
||||
addDebugLog("state", `Plan ${plan.id} rejected via modal`);
|
||||
appStore.addLog({
|
||||
type: "system",
|
||||
content: `Plan "${plan.title}" cancelled.`,
|
||||
});
|
||||
}
|
||||
|
||||
resolve(response.approved);
|
||||
},
|
||||
});
|
||||
});
|
||||
|
||||
// If the plan was approved, re-run the agent loop so it continues working
|
||||
if (approved) {
|
||||
addDebugLog("api", "Re-running agent after plan approval");
|
||||
appStore.setMode("thinking");
|
||||
appStore.startThinking();
|
||||
appStore.startStreaming();
|
||||
|
||||
result = await agent.run(state.messages);
|
||||
addDebugLog(
|
||||
"api",
|
||||
`Agent.run() (post-plan) completed: success=${result.success}, iterations=${result.iterations}, stopReason=${result.stopReason}`,
|
||||
);
|
||||
|
||||
await processAgentResult(result, message);
|
||||
|
||||
// Loop again to check for new pending plans from this agent run
|
||||
continueAfterPlan = true;
|
||||
} else {
|
||||
appStore.setMode("idle");
|
||||
}
|
||||
}
|
||||
} catch (error) {
|
||||
appStore.cancelStreaming();
|
||||
appStore.stopThinking();
|
||||
|
||||
@@ -3,6 +3,7 @@
|
||||
*/
|
||||
|
||||
import { MODEL_MESSAGES } from "@constants/chat-service";
|
||||
import { getModelContextSize } from "@constants/copilot";
|
||||
import { getConfig } from "@services/core/config";
|
||||
import { getProvider } from "@providers/core/registry";
|
||||
import {
|
||||
@@ -35,6 +36,19 @@ export const loadModels = async (
|
||||
}
|
||||
};
|
||||
|
||||
/**
|
||||
* Resolve the context window size for a given provider + model.
|
||||
* Uses the Copilot context-size table when available, otherwise
|
||||
* falls back to DEFAULT_CONTEXT_SIZE.
|
||||
*/
|
||||
const resolveContextMaxTokens = (
|
||||
provider: ProviderName,
|
||||
modelId: string | undefined,
|
||||
): number => {
|
||||
const effectiveModel = modelId ?? getDefaultModel(provider);
|
||||
return getModelContextSize(effectiveModel).input;
|
||||
};
|
||||
|
||||
export const handleModelSelect = async (
|
||||
state: ChatServiceState,
|
||||
model: string,
|
||||
@@ -49,6 +63,12 @@ export const handleModelSelect = async (
|
||||
}
|
||||
appStore.setModel(model);
|
||||
|
||||
// Update context max tokens for the newly selected model
|
||||
const effectiveModel = model === "auto" ? undefined : model;
|
||||
appStore.setContextMaxTokens(
|
||||
resolveContextMaxTokens(state.provider, effectiveModel),
|
||||
);
|
||||
|
||||
const config = await getConfig();
|
||||
config.set("model", model === "auto" ? undefined : model);
|
||||
await config.save();
|
||||
|
||||
@@ -11,6 +11,7 @@ import { v4 as uuidv4 } from "uuid";
|
||||
import type { PlanApprovalPromptResponse } from "@/types/tui";
|
||||
import type { ImplementationPlan } from "@/types/plan-mode";
|
||||
import { appStore } from "@tui-solid/context/app";
|
||||
import { formatPlanForDisplay } from "@services/plan-mode/plan-service";
|
||||
|
||||
export interface PlanApprovalHandlerRequest {
|
||||
plan: ImplementationPlan;
|
||||
@@ -43,6 +44,7 @@ export const createPlanApprovalHandler = (): PlanApprovalHandler => {
|
||||
id: uuidv4(),
|
||||
planTitle: request.plan.title,
|
||||
planSummary: request.plan.summary,
|
||||
planContent: formatPlanForDisplay(request.plan),
|
||||
planFilePath: request.planFilePath,
|
||||
resolve: (response) => {
|
||||
appStore.setPlanApprovalPrompt(null);
|
||||
|
||||
@@ -1,289 +0,0 @@
|
||||
/**
|
||||
* Streaming Chat TUI Integration
|
||||
*
|
||||
* Connects the streaming agent loop to the TUI store for real-time updates.
|
||||
*/
|
||||
|
||||
import type { Message } from "@/types/providers";
|
||||
import type { AgentOptions } from "@interfaces/AgentOptions";
|
||||
import type { AgentResult } from "@interfaces/AgentResult";
|
||||
import type { StreamingChatOptions } from "@interfaces/StreamingChatOptions";
|
||||
import type {
|
||||
StreamCallbacks,
|
||||
PartialToolCall,
|
||||
ModelSwitchInfo,
|
||||
} from "@/types/streaming";
|
||||
import type { ToolCall, ToolResult } from "@/types/tools";
|
||||
import { createStreamingAgent } from "@services/agent-stream";
|
||||
import { createThinkingParser } from "@services/reasoning/thinking-parser";
|
||||
import { appStore } from "@tui-solid/context/app";
|
||||
|
||||
// Re-export for convenience
|
||||
export type { StreamingChatOptions } from "@interfaces/StreamingChatOptions";
|
||||
|
||||
// =============================================================================
|
||||
// TUI Streaming Callbacks
|
||||
// =============================================================================
|
||||
|
||||
const createTUIStreamCallbacks = (
|
||||
options?: Partial<StreamingChatOptions>,
|
||||
): { callbacks: StreamCallbacks; resetParser: () => void } => {
|
||||
const parser = createThinkingParser();
|
||||
|
||||
const emitThinking = (thinking: string | null): void => {
|
||||
if (!thinking) return;
|
||||
appStore.addLog({
|
||||
type: "thinking",
|
||||
content: thinking,
|
||||
});
|
||||
};
|
||||
|
||||
const callbacks: StreamCallbacks = {
|
||||
onContentChunk: (content: string) => {
|
||||
const result = parser.feed(content);
|
||||
if (result.visible) {
|
||||
appStore.appendStreamContent(result.visible);
|
||||
}
|
||||
emitThinking(result.thinking);
|
||||
},
|
||||
|
||||
onToolCallStart: (toolCall: PartialToolCall) => {
|
||||
appStore.setCurrentToolCall({
|
||||
id: toolCall.id,
|
||||
name: toolCall.name,
|
||||
description: `Calling ${toolCall.name}...`,
|
||||
status: "pending",
|
||||
});
|
||||
},
|
||||
|
||||
onToolCallComplete: (toolCall: ToolCall) => {
|
||||
appStore.updateToolCall({
|
||||
id: toolCall.id,
|
||||
name: toolCall.name,
|
||||
status: "running",
|
||||
});
|
||||
},
|
||||
|
||||
onModelSwitch: (info: ModelSwitchInfo) => {
|
||||
appStore.addLog({
|
||||
type: "system",
|
||||
content: `Model switched: ${info.from} → ${info.to} (${info.reason})`,
|
||||
});
|
||||
options?.onModelSwitch?.(info);
|
||||
},
|
||||
|
||||
onComplete: () => {
|
||||
const flushed = parser.flush();
|
||||
if (flushed.visible) {
|
||||
appStore.appendStreamContent(flushed.visible);
|
||||
}
|
||||
emitThinking(flushed.thinking);
|
||||
appStore.completeStreaming();
|
||||
},
|
||||
|
||||
onError: (error: string) => {
|
||||
parser.reset();
|
||||
appStore.cancelStreaming();
|
||||
appStore.addLog({
|
||||
type: "error",
|
||||
content: error,
|
||||
});
|
||||
},
|
||||
};
|
||||
|
||||
return { callbacks, resetParser: () => parser.reset() };
|
||||
};
|
||||
|
||||
// =============================================================================
|
||||
// Agent Options with TUI Integration
|
||||
// =============================================================================
|
||||
|
||||
const createAgentOptionsWithTUI = (
|
||||
options: StreamingChatOptions,
|
||||
): AgentOptions => ({
|
||||
...options,
|
||||
|
||||
onText: (text: string) => {
|
||||
// Text is handled by streaming callbacks, but we may want to notify
|
||||
options.onText?.(text);
|
||||
},
|
||||
|
||||
onToolCall: (toolCall: ToolCall) => {
|
||||
appStore.setMode("tool_execution");
|
||||
appStore.setCurrentToolCall({
|
||||
id: toolCall.id,
|
||||
name: toolCall.name,
|
||||
description: `Executing ${toolCall.name}...`,
|
||||
status: "running",
|
||||
});
|
||||
|
||||
appStore.addLog({
|
||||
type: "tool",
|
||||
content: `${toolCall.name}`,
|
||||
metadata: {
|
||||
toolName: toolCall.name,
|
||||
toolStatus: "running",
|
||||
toolDescription: `Executing ${toolCall.name}`,
|
||||
toolArgs: toolCall.arguments,
|
||||
},
|
||||
});
|
||||
|
||||
options.onToolCall?.(toolCall);
|
||||
},
|
||||
|
||||
onToolResult: (toolCallId: string, result: ToolResult) => {
|
||||
appStore.updateToolCall({
|
||||
status: result.success ? "success" : "error",
|
||||
result: result.output,
|
||||
error: result.error,
|
||||
});
|
||||
|
||||
appStore.addLog({
|
||||
type: "tool",
|
||||
content: result.output || result.error || "",
|
||||
metadata: {
|
||||
toolName: appStore.getState().currentToolCall?.name,
|
||||
toolStatus: result.success ? "success" : "error",
|
||||
toolDescription: result.title,
|
||||
},
|
||||
});
|
||||
|
||||
appStore.setCurrentToolCall(null);
|
||||
appStore.setMode("thinking");
|
||||
|
||||
options.onToolResult?.(toolCallId, result);
|
||||
},
|
||||
|
||||
onError: (error: string) => {
|
||||
appStore.setMode("idle");
|
||||
appStore.addLog({
|
||||
type: "error",
|
||||
content: error,
|
||||
});
|
||||
options.onError?.(error);
|
||||
},
|
||||
|
||||
onWarning: (warning: string) => {
|
||||
appStore.addLog({
|
||||
type: "system",
|
||||
content: warning,
|
||||
});
|
||||
options.onWarning?.(warning);
|
||||
},
|
||||
});
|
||||
|
||||
// =============================================================================
|
||||
// Main API
|
||||
// =============================================================================
|
||||
|
||||
/**
|
||||
* Run a streaming chat session with TUI integration
|
||||
*/
|
||||
export const runStreamingChat = async (
|
||||
messages: Message[],
|
||||
options: StreamingChatOptions,
|
||||
): Promise<AgentResult> => {
|
||||
// Set up TUI state
|
||||
appStore.setMode("thinking");
|
||||
appStore.startThinking();
|
||||
appStore.startStreaming();
|
||||
|
||||
// Create callbacks that update the TUI
|
||||
const { callbacks: streamCallbacks, resetParser } =
|
||||
createTUIStreamCallbacks(options);
|
||||
const agentOptions = createAgentOptionsWithTUI(options);
|
||||
|
||||
// Reset parser for fresh session
|
||||
resetParser();
|
||||
|
||||
// Create and run the streaming agent
|
||||
const agent = createStreamingAgent(
|
||||
process.cwd(),
|
||||
agentOptions,
|
||||
streamCallbacks,
|
||||
);
|
||||
|
||||
try {
|
||||
const result = await agent.run(messages);
|
||||
|
||||
appStore.stopThinking();
|
||||
appStore.setMode("idle");
|
||||
|
||||
return result;
|
||||
} catch (error) {
|
||||
appStore.cancelStreaming();
|
||||
appStore.stopThinking();
|
||||
appStore.setMode("idle");
|
||||
|
||||
const errorMessage = error instanceof Error ? error.message : String(error);
|
||||
appStore.addLog({
|
||||
type: "error",
|
||||
content: errorMessage,
|
||||
});
|
||||
|
||||
return {
|
||||
success: false,
|
||||
finalResponse: errorMessage,
|
||||
iterations: 0,
|
||||
toolCalls: [],
|
||||
};
|
||||
}
|
||||
};
|
||||
|
||||
/**
|
||||
* Create a streaming chat instance with stop capability
|
||||
*/
|
||||
export const createStreamingChat = (
|
||||
options: StreamingChatOptions,
|
||||
): {
|
||||
run: (messages: Message[]) => Promise<AgentResult>;
|
||||
stop: () => void;
|
||||
} => {
|
||||
const { callbacks: streamCallbacks, resetParser } =
|
||||
createTUIStreamCallbacks(options);
|
||||
const agentOptions = createAgentOptionsWithTUI(options);
|
||||
|
||||
const agent = createStreamingAgent(
|
||||
process.cwd(),
|
||||
agentOptions,
|
||||
streamCallbacks,
|
||||
);
|
||||
|
||||
return {
|
||||
run: async (messages: Message[]) => {
|
||||
resetParser();
|
||||
appStore.setMode("thinking");
|
||||
appStore.startThinking();
|
||||
appStore.startStreaming();
|
||||
|
||||
try {
|
||||
const result = await agent.run(messages);
|
||||
|
||||
appStore.stopThinking();
|
||||
appStore.setMode("idle");
|
||||
|
||||
return result;
|
||||
} catch (error) {
|
||||
appStore.cancelStreaming();
|
||||
appStore.stopThinking();
|
||||
appStore.setMode("idle");
|
||||
|
||||
const errorMessage =
|
||||
error instanceof Error ? error.message : String(error);
|
||||
return {
|
||||
success: false,
|
||||
finalResponse: errorMessage,
|
||||
iterations: 0,
|
||||
toolCalls: [],
|
||||
};
|
||||
}
|
||||
},
|
||||
|
||||
stop: () => {
|
||||
agent.stop();
|
||||
appStore.cancelStreaming();
|
||||
appStore.stopThinking();
|
||||
appStore.setMode("idle");
|
||||
},
|
||||
};
|
||||
};
|
||||
@@ -204,47 +204,86 @@ export const matchesPathPattern = (
|
||||
};
|
||||
|
||||
/**
|
||||
* Check if a Bash command is allowed
|
||||
* Split a shell command into individual sub-commands on chaining operators.
|
||||
* Handles &&, ||, ;, and | (pipe).
|
||||
* This prevents a pattern like Bash(cd:*) from silently approving
|
||||
* "cd /safe && rm -rf /dangerous".
|
||||
*/
|
||||
const splitChainedCommands = (command: string): string[] => {
|
||||
// Split on shell chaining operators, but not inside quoted strings.
|
||||
// Simple heuristic: split on &&, ||, ;, | (not ||) that are not inside quotes.
|
||||
const parts: string[] = [];
|
||||
let current = "";
|
||||
let inSingle = false;
|
||||
let inDouble = false;
|
||||
|
||||
for (let i = 0; i < command.length; i++) {
|
||||
const ch = command[i];
|
||||
const next = command[i + 1];
|
||||
|
||||
// Track quoting
|
||||
if (ch === "'" && !inDouble) { inSingle = !inSingle; current += ch; continue; }
|
||||
if (ch === '"' && !inSingle) { inDouble = !inDouble; current += ch; continue; }
|
||||
if (inSingle || inDouble) { current += ch; continue; }
|
||||
|
||||
// Check for operators
|
||||
if (ch === "&" && next === "&") { parts.push(current); current = ""; i++; continue; }
|
||||
if (ch === "|" && next === "|") { parts.push(current); current = ""; i++; continue; }
|
||||
if (ch === ";") { parts.push(current); current = ""; continue; }
|
||||
if (ch === "|") { parts.push(current); current = ""; continue; }
|
||||
|
||||
current += ch;
|
||||
}
|
||||
|
||||
if (current.trim()) parts.push(current);
|
||||
return parts.map((p) => p.trim()).filter(Boolean);
|
||||
};
|
||||
|
||||
/**
|
||||
* Check if a Bash command is allowed.
|
||||
* For chained commands (&&, ||, ;, |), EVERY sub-command must be allowed.
|
||||
*/
|
||||
export const isBashAllowed = (command: string): boolean => {
|
||||
const subCommands = splitChainedCommands(command);
|
||||
|
||||
const allPatterns = [
|
||||
...sessionAllowPatterns,
|
||||
...localAllowPatterns,
|
||||
...globalAllowPatterns,
|
||||
];
|
||||
|
||||
for (const patternStr of allPatterns) {
|
||||
const pattern = parsePattern(patternStr);
|
||||
if (
|
||||
pattern &&
|
||||
pattern.tool === "Bash" &&
|
||||
matchesBashPattern(command, pattern)
|
||||
) {
|
||||
return true;
|
||||
}
|
||||
}
|
||||
|
||||
return false;
|
||||
// Every sub-command must match at least one allow pattern
|
||||
return subCommands.every((subCmd) =>
|
||||
allPatterns.some((patternStr) => {
|
||||
const pattern = parsePattern(patternStr);
|
||||
return (
|
||||
pattern &&
|
||||
pattern.tool === "Bash" &&
|
||||
matchesBashPattern(subCmd, pattern)
|
||||
);
|
||||
}),
|
||||
);
|
||||
};
|
||||
|
||||
/**
|
||||
* Check if a Bash command is denied
|
||||
* Check if a Bash command is denied.
|
||||
* For chained commands, if ANY sub-command is denied, the whole command is denied.
|
||||
*/
|
||||
export const isBashDenied = (command: string): boolean => {
|
||||
const subCommands = splitChainedCommands(command);
|
||||
const denyPatterns = [...localDenyPatterns, ...globalDenyPatterns];
|
||||
|
||||
for (const patternStr of denyPatterns) {
|
||||
const pattern = parsePattern(patternStr);
|
||||
if (
|
||||
pattern &&
|
||||
pattern.tool === "Bash" &&
|
||||
matchesBashPattern(command, pattern)
|
||||
) {
|
||||
return true;
|
||||
}
|
||||
}
|
||||
|
||||
return false;
|
||||
// If any sub-command matches a deny pattern, deny the whole command
|
||||
return subCommands.some((subCmd) =>
|
||||
denyPatterns.some((patternStr) => {
|
||||
const pattern = parsePattern(patternStr);
|
||||
return (
|
||||
pattern &&
|
||||
pattern.tool === "Bash" &&
|
||||
matchesBashPattern(subCmd, pattern)
|
||||
);
|
||||
}),
|
||||
);
|
||||
};
|
||||
|
||||
/**
|
||||
@@ -273,9 +312,9 @@ export const isFileOpAllowed = (
|
||||
};
|
||||
|
||||
/**
|
||||
* Generate a pattern for the given command
|
||||
* Generate a pattern for a single (non-chained) command
|
||||
*/
|
||||
export const generateBashPattern = (command: string): string => {
|
||||
const generateSingleBashPattern = (command: string): string => {
|
||||
const parts = command.trim().split(/\s+/);
|
||||
|
||||
if (parts.length === 0) return `Bash(${command}:*)`;
|
||||
@@ -290,6 +329,33 @@ export const generateBashPattern = (command: string): string => {
|
||||
return `Bash(${firstWord}:*)`;
|
||||
};
|
||||
|
||||
/**
|
||||
* Generate patterns for the given command.
|
||||
* For chained commands (&&, ||, ;, |), returns one pattern per sub-command.
|
||||
* This prevents "Bash(cd:*)" from blanket-approving everything chained after cd.
|
||||
*/
|
||||
export const generateBashPattern = (command: string): string => {
|
||||
const subCommands = splitChainedCommands(command);
|
||||
|
||||
if (subCommands.length <= 1) {
|
||||
return generateSingleBashPattern(command);
|
||||
}
|
||||
|
||||
// For chained commands, return all unique patterns joined so the user can see them
|
||||
const patterns = [
|
||||
...new Set(subCommands.map(generateSingleBashPattern)),
|
||||
];
|
||||
return patterns.join(", ");
|
||||
};
|
||||
|
||||
/**
|
||||
* Generate individual patterns for a command (used for storing)
|
||||
*/
|
||||
export const generateBashPatterns = (command: string): string[] => {
|
||||
const subCommands = splitChainedCommands(command);
|
||||
return [...new Set(subCommands.map(generateSingleBashPattern))];
|
||||
};
|
||||
|
||||
/**
|
||||
* Add a pattern to session allow list
|
||||
*/
|
||||
@@ -385,21 +451,23 @@ export const clearSessionPatterns = (): void => {
|
||||
};
|
||||
|
||||
/**
|
||||
* Handle permission scope
|
||||
* Handle permission scope — stores one or more patterns
|
||||
*/
|
||||
const handlePermissionScope = async (
|
||||
scope: string,
|
||||
pattern: string,
|
||||
patterns: string[],
|
||||
): Promise<void> => {
|
||||
const scopeHandlers: Record<string, () => Promise<void> | void> = {
|
||||
session: () => addSessionPattern(pattern),
|
||||
local: () => addLocalPattern(pattern),
|
||||
global: () => addGlobalPattern(pattern),
|
||||
};
|
||||
for (const pattern of patterns) {
|
||||
const scopeHandlers: Record<string, () => Promise<void> | void> = {
|
||||
session: () => addSessionPattern(pattern),
|
||||
local: () => addLocalPattern(pattern),
|
||||
global: () => addGlobalPattern(pattern),
|
||||
};
|
||||
|
||||
const handler = scopeHandlers[scope];
|
||||
if (handler) {
|
||||
await handler();
|
||||
const handler = scopeHandlers[scope];
|
||||
if (handler) {
|
||||
await handler();
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
@@ -419,6 +487,7 @@ export const promptBashPermission = async (
|
||||
}
|
||||
|
||||
const suggestedPattern = generateBashPattern(command);
|
||||
const patterns = generateBashPatterns(command);
|
||||
|
||||
// Use custom handler if set (TUI mode)
|
||||
if (permissionHandler) {
|
||||
@@ -430,7 +499,7 @@ export const promptBashPermission = async (
|
||||
});
|
||||
|
||||
if (response.allowed && response.scope) {
|
||||
await handlePermissionScope(response.scope, suggestedPattern);
|
||||
await handlePermissionScope(response.scope, patterns);
|
||||
}
|
||||
|
||||
return {
|
||||
@@ -468,55 +537,61 @@ export const promptBashPermission = async (
|
||||
process.stdin.removeListener("data", handleInput);
|
||||
process.stdin.setRawMode?.(false);
|
||||
|
||||
const addAllPatterns = async (
|
||||
addFn: (p: string) => void | Promise<void>,
|
||||
): Promise<void> => {
|
||||
for (const p of patterns) await addFn(p);
|
||||
};
|
||||
|
||||
const responseMap: Record<string, () => Promise<void>> = {
|
||||
y: async () => resolve({ allowed: true }),
|
||||
yes: async () => resolve({ allowed: true }),
|
||||
s: async () => {
|
||||
addSessionPattern(suggestedPattern);
|
||||
await addAllPatterns(addSessionPattern);
|
||||
console.log(
|
||||
chalk.blue(`\n✓ Added session pattern: ${suggestedPattern}`),
|
||||
chalk.blue(`\n✓ Added session patterns: ${suggestedPattern}`),
|
||||
);
|
||||
resolve({ allowed: true, remember: "session" });
|
||||
},
|
||||
session: async () => {
|
||||
addSessionPattern(suggestedPattern);
|
||||
await addAllPatterns(addSessionPattern);
|
||||
console.log(
|
||||
chalk.blue(`\n✓ Added session pattern: ${suggestedPattern}`),
|
||||
chalk.blue(`\n✓ Added session patterns: ${suggestedPattern}`),
|
||||
);
|
||||
resolve({ allowed: true, remember: "session" });
|
||||
},
|
||||
l: async () => {
|
||||
await addLocalPattern(suggestedPattern);
|
||||
await addAllPatterns(addLocalPattern);
|
||||
console.log(
|
||||
chalk.cyan(`\n✓ Added project pattern: ${suggestedPattern}`),
|
||||
chalk.cyan(`\n✓ Added project patterns: ${suggestedPattern}`),
|
||||
);
|
||||
resolve({ allowed: true, remember: "local" });
|
||||
},
|
||||
local: async () => {
|
||||
await addLocalPattern(suggestedPattern);
|
||||
await addAllPatterns(addLocalPattern);
|
||||
console.log(
|
||||
chalk.cyan(`\n✓ Added project pattern: ${suggestedPattern}`),
|
||||
chalk.cyan(`\n✓ Added project patterns: ${suggestedPattern}`),
|
||||
);
|
||||
resolve({ allowed: true, remember: "local" });
|
||||
},
|
||||
project: async () => {
|
||||
await addLocalPattern(suggestedPattern);
|
||||
await addAllPatterns(addLocalPattern);
|
||||
console.log(
|
||||
chalk.cyan(`\n✓ Added project pattern: ${suggestedPattern}`),
|
||||
chalk.cyan(`\n✓ Added project patterns: ${suggestedPattern}`),
|
||||
);
|
||||
resolve({ allowed: true, remember: "local" });
|
||||
},
|
||||
g: async () => {
|
||||
await addGlobalPattern(suggestedPattern);
|
||||
await addAllPatterns(addGlobalPattern);
|
||||
console.log(
|
||||
chalk.magenta(`\n✓ Added global pattern: ${suggestedPattern}`),
|
||||
chalk.magenta(`\n✓ Added global patterns: ${suggestedPattern}`),
|
||||
);
|
||||
resolve({ allowed: true, remember: "global" });
|
||||
},
|
||||
global: async () => {
|
||||
await addGlobalPattern(suggestedPattern);
|
||||
await addAllPatterns(addGlobalPattern);
|
||||
console.log(
|
||||
chalk.magenta(`\n✓ Added global pattern: ${suggestedPattern}`),
|
||||
chalk.magenta(`\n✓ Added global patterns: ${suggestedPattern}`),
|
||||
);
|
||||
resolve({ allowed: true, remember: "global" });
|
||||
},
|
||||
@@ -562,7 +637,7 @@ export const promptFilePermission = async (
|
||||
});
|
||||
|
||||
if (response.allowed && response.scope) {
|
||||
await handlePermissionScope(response.scope, suggestedPattern);
|
||||
await handlePermissionScope(response.scope, [suggestedPattern]);
|
||||
}
|
||||
|
||||
return {
|
||||
|
||||
@@ -22,12 +22,58 @@ export interface DangerCheckResult {
|
||||
}
|
||||
|
||||
/**
|
||||
* Check if a command matches any blocked pattern
|
||||
* Split a shell command into individual sub-commands on chaining operators.
|
||||
* Handles &&, ||, ;, and | (pipe). Respects quoted strings.
|
||||
*/
|
||||
const splitChainedCommands = (command: string): string[] => {
|
||||
const parts: string[] = [];
|
||||
let current = "";
|
||||
let inSingle = false;
|
||||
let inDouble = false;
|
||||
|
||||
for (let i = 0; i < command.length; i++) {
|
||||
const ch = command[i];
|
||||
const next = command[i + 1];
|
||||
|
||||
if (ch === "'" && !inDouble) { inSingle = !inSingle; current += ch; continue; }
|
||||
if (ch === '"' && !inSingle) { inDouble = !inDouble; current += ch; continue; }
|
||||
if (inSingle || inDouble) { current += ch; continue; }
|
||||
|
||||
if (ch === "&" && next === "&") { parts.push(current); current = ""; i++; continue; }
|
||||
if (ch === "|" && next === "|") { parts.push(current); current = ""; i++; continue; }
|
||||
if (ch === ";") { parts.push(current); current = ""; continue; }
|
||||
if (ch === "|") { parts.push(current); current = ""; continue; }
|
||||
|
||||
current += ch;
|
||||
}
|
||||
|
||||
if (current.trim()) parts.push(current);
|
||||
return parts.map((p) => p.trim()).filter(Boolean);
|
||||
};
|
||||
|
||||
/**
|
||||
* Check if a command matches any blocked pattern.
|
||||
* For chained commands (&&, ||, ;, |), each sub-command is checked individually
|
||||
* to prevent dangerous commands hidden behind benign ones (e.g. cd /safe && rm -rf /).
|
||||
*/
|
||||
export const checkDangerousCommand = (command: string): DangerCheckResult => {
|
||||
// Normalize command for checking
|
||||
const normalizedCommand = command.trim();
|
||||
const subCommands = splitChainedCommands(command);
|
||||
|
||||
for (const subCmd of subCommands) {
|
||||
const normalized = subCmd.trim();
|
||||
for (const pattern of BLOCKED_PATTERNS) {
|
||||
if (pattern.pattern.test(normalized)) {
|
||||
return {
|
||||
blocked: true,
|
||||
pattern,
|
||||
message: formatBlockedMessage(pattern),
|
||||
};
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Also check the full command in case a pattern targets the chaining itself
|
||||
const normalizedCommand = command.trim();
|
||||
for (const pattern of BLOCKED_PATTERNS) {
|
||||
if (pattern.pattern.test(normalizedCommand)) {
|
||||
return {
|
||||
|
||||
364
src/services/external-agent-loader.ts
Normal file
364
src/services/external-agent-loader.ts
Normal file
@@ -0,0 +1,364 @@
|
||||
/**
|
||||
* External Agent Loader
|
||||
*
|
||||
* Loads agent definitions from .claude/, .github/, .codetyper/
|
||||
* directories in the project root. These agents are parsed from
|
||||
* their respective frontmatter+markdown format and converted to
|
||||
* SkillDefinition for unified handling.
|
||||
*/
|
||||
|
||||
import fs from "fs/promises";
|
||||
import { join, basename, extname } from "path";
|
||||
import {
|
||||
EXTERNAL_AGENT_DIRS,
|
||||
EXTERNAL_AGENT_FILES,
|
||||
SKILL_DEFAULTS,
|
||||
} from "@constants/skills";
|
||||
import type {
|
||||
SkillDefinition,
|
||||
SkillSource,
|
||||
ExternalAgentFile,
|
||||
ParsedExternalAgent,
|
||||
} from "@/types/skills";
|
||||
|
||||
// ============================================================================
|
||||
// File Discovery
|
||||
// ============================================================================
|
||||
|
||||
/**
|
||||
* Check if a file is a recognized agent definition
|
||||
*/
|
||||
const isAgentFile = (filename: string): boolean => {
|
||||
const lower = filename.toLowerCase();
|
||||
const ext = extname(lower);
|
||||
|
||||
// Check known filenames
|
||||
if (EXTERNAL_AGENT_FILES.KNOWN_FILES.some((f) => lower === f.toLowerCase())) {
|
||||
return true;
|
||||
}
|
||||
|
||||
// Check extensions for files in agent subdirectories
|
||||
return (EXTERNAL_AGENT_FILES.EXTENSIONS as readonly string[]).includes(ext);
|
||||
};
|
||||
|
||||
/**
|
||||
* Scan a directory for agent files (non-recursive for top level)
|
||||
*/
|
||||
const scanDirectory = async (
|
||||
dir: string,
|
||||
source: SkillSource,
|
||||
): Promise<ExternalAgentFile[]> => {
|
||||
const files: ExternalAgentFile[] = [];
|
||||
|
||||
try {
|
||||
await fs.access(dir);
|
||||
} catch {
|
||||
return files; // Directory doesn't exist
|
||||
}
|
||||
|
||||
try {
|
||||
const entries = await fs.readdir(dir, { withFileTypes: true });
|
||||
|
||||
for (const entry of entries) {
|
||||
const fullPath = join(dir, entry.name);
|
||||
|
||||
if (entry.isFile() && isAgentFile(entry.name)) {
|
||||
try {
|
||||
const content = await fs.readFile(fullPath, "utf-8");
|
||||
files.push({
|
||||
relativePath: entry.name,
|
||||
absolutePath: fullPath,
|
||||
source,
|
||||
content,
|
||||
});
|
||||
} catch {
|
||||
// Skip unreadable files
|
||||
}
|
||||
} else if (
|
||||
entry.isDirectory() &&
|
||||
(EXTERNAL_AGENT_FILES.SUBDIRS as readonly string[]).includes(entry.name.toLowerCase())
|
||||
) {
|
||||
// Scan recognized subdirectories
|
||||
const subFiles = await scanSubdirectory(fullPath, source);
|
||||
files.push(...subFiles);
|
||||
}
|
||||
}
|
||||
} catch {
|
||||
// Directory not accessible
|
||||
}
|
||||
|
||||
return files;
|
||||
};
|
||||
|
||||
/**
|
||||
* Scan a subdirectory for agent files
|
||||
*/
|
||||
const scanSubdirectory = async (
|
||||
dir: string,
|
||||
source: SkillSource,
|
||||
): Promise<ExternalAgentFile[]> => {
|
||||
const files: ExternalAgentFile[] = [];
|
||||
|
||||
try {
|
||||
const entries = await fs.readdir(dir, { withFileTypes: true });
|
||||
|
||||
for (const entry of entries) {
|
||||
if (!entry.isFile()) continue;
|
||||
|
||||
const ext = extname(entry.name).toLowerCase();
|
||||
if (!(EXTERNAL_AGENT_FILES.EXTENSIONS as readonly string[]).includes(ext)) continue;
|
||||
|
||||
const fullPath = join(dir, entry.name);
|
||||
|
||||
try {
|
||||
const content = await fs.readFile(fullPath, "utf-8");
|
||||
files.push({
|
||||
relativePath: join(basename(dir), entry.name),
|
||||
absolutePath: fullPath,
|
||||
source,
|
||||
content,
|
||||
});
|
||||
} catch {
|
||||
// Skip unreadable files
|
||||
}
|
||||
}
|
||||
} catch {
|
||||
// Subdirectory not accessible
|
||||
}
|
||||
|
||||
return files;
|
||||
};
|
||||
|
||||
// ============================================================================
|
||||
// Parsing
|
||||
// ============================================================================
|
||||
|
||||
/**
|
||||
* Parse the frontmatter from an external agent file.
|
||||
* Supports the standard --- delimited YAML-like frontmatter.
|
||||
*/
|
||||
const parseFrontmatter = (
|
||||
content: string,
|
||||
): { frontmatter: Record<string, unknown>; body: string } => {
|
||||
const lines = content.split("\n");
|
||||
|
||||
if (lines[0]?.trim() !== "---") {
|
||||
// No frontmatter — the entire content is the body
|
||||
return { frontmatter: {}, body: content.trim() };
|
||||
}
|
||||
|
||||
let endIndex = -1;
|
||||
for (let i = 1; i < lines.length; i++) {
|
||||
if (lines[i]?.trim() === "---") {
|
||||
endIndex = i;
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
if (endIndex === -1) {
|
||||
return { frontmatter: {}, body: content.trim() };
|
||||
}
|
||||
|
||||
const fmLines = lines.slice(1, endIndex);
|
||||
const body = lines
|
||||
.slice(endIndex + 1)
|
||||
.join("\n")
|
||||
.trim();
|
||||
|
||||
// Simple YAML-like parsing
|
||||
const fm: Record<string, unknown> = {};
|
||||
let currentKey: string | null = null;
|
||||
let currentArray: string[] | null = null;
|
||||
|
||||
for (const line of fmLines) {
|
||||
const trimmed = line.trim();
|
||||
if (!trimmed || trimmed.startsWith("#")) continue;
|
||||
|
||||
// Array item
|
||||
if (trimmed.startsWith("- ") && currentKey) {
|
||||
if (!currentArray) currentArray = [];
|
||||
const value = trimmed.slice(2).trim().replace(/^["']|["']$/g, "");
|
||||
currentArray.push(value);
|
||||
fm[currentKey] = currentArray;
|
||||
continue;
|
||||
}
|
||||
|
||||
// Key-value pair
|
||||
const colonIdx = trimmed.indexOf(":");
|
||||
if (colonIdx > 0) {
|
||||
if (currentArray && currentKey) {
|
||||
fm[currentKey] = currentArray;
|
||||
}
|
||||
currentArray = null;
|
||||
|
||||
currentKey = trimmed.slice(0, colonIdx).trim();
|
||||
const rawValue = trimmed.slice(colonIdx + 1).trim();
|
||||
|
||||
if (!rawValue) continue; // Empty → might be array header
|
||||
|
||||
// Inline array: [a, b, c]
|
||||
if (rawValue.startsWith("[") && rawValue.endsWith("]")) {
|
||||
const items = rawValue
|
||||
.slice(1, -1)
|
||||
.split(",")
|
||||
.map((s) => s.trim().replace(/^["']|["']$/g, ""))
|
||||
.filter(Boolean);
|
||||
fm[currentKey] = items;
|
||||
} else {
|
||||
fm[currentKey] = rawValue.replace(/^["']|["']$/g, "");
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return { frontmatter: fm, body };
|
||||
};
|
||||
|
||||
/**
|
||||
* Parse an external agent file into a structured definition
|
||||
*/
|
||||
const parseAgentFile = (file: ExternalAgentFile): ParsedExternalAgent => {
|
||||
const { frontmatter, body } = parseFrontmatter(file.content);
|
||||
|
||||
// Derive ID from filename (strip extension, lowercase, kebab-case)
|
||||
const nameWithoutExt = basename(file.relativePath, extname(file.relativePath));
|
||||
const id = `ext-${file.source.replace("external-", "")}-${nameWithoutExt
|
||||
.toLowerCase()
|
||||
.replace(/\s+/g, "-")}`;
|
||||
|
||||
const description =
|
||||
typeof frontmatter.description === "string"
|
||||
? frontmatter.description
|
||||
: `External agent from ${file.source}: ${nameWithoutExt}`;
|
||||
|
||||
const tools = Array.isArray(frontmatter.tools)
|
||||
? (frontmatter.tools as string[])
|
||||
: [];
|
||||
|
||||
return {
|
||||
id,
|
||||
description,
|
||||
tools,
|
||||
body,
|
||||
source: file.source,
|
||||
filePath: file.absolutePath,
|
||||
};
|
||||
};
|
||||
|
||||
// ============================================================================
|
||||
// Conversion
|
||||
// ============================================================================
|
||||
|
||||
/**
|
||||
* Convert a parsed external agent to a SkillDefinition
|
||||
* so it can be used uniformly in the skill registry.
|
||||
*/
|
||||
const toSkillDefinition = (agent: ParsedExternalAgent): SkillDefinition => {
|
||||
// Derive a human-readable name from the ID
|
||||
const name = agent.id
|
||||
.replace(/^ext-[a-z]+-/, "")
|
||||
.split("-")
|
||||
.map((w) => w.charAt(0).toUpperCase() + w.slice(1))
|
||||
.join(" ");
|
||||
|
||||
// Extract triggers from the agent body (look for trigger patterns)
|
||||
const triggers: string[] = [`/${agent.id}`];
|
||||
|
||||
return {
|
||||
id: agent.id,
|
||||
name,
|
||||
description: agent.description,
|
||||
version: SKILL_DEFAULTS.VERSION,
|
||||
triggers,
|
||||
triggerType: "explicit",
|
||||
autoTrigger: false,
|
||||
requiredTools: agent.tools,
|
||||
tags: [agent.source, "external"],
|
||||
source: agent.source,
|
||||
systemPrompt: "",
|
||||
instructions: agent.body,
|
||||
loadedAt: Date.now(),
|
||||
};
|
||||
};
|
||||
|
||||
// ============================================================================
|
||||
// Public API
|
||||
// ============================================================================
|
||||
|
||||
/**
|
||||
* Source-to-directory mapping
|
||||
*/
|
||||
const SOURCE_DIRS: ReadonlyArray<readonly [string, SkillSource]> = [
|
||||
[EXTERNAL_AGENT_DIRS.CLAUDE, "external-claude"],
|
||||
[EXTERNAL_AGENT_DIRS.GITHUB, "external-github"],
|
||||
[EXTERNAL_AGENT_DIRS.CODETYPER, "external-codetyper"],
|
||||
];
|
||||
|
||||
/**
|
||||
* Load all external agents from recognized directories
|
||||
* in the current project.
|
||||
*/
|
||||
export const loadExternalAgents = async (
|
||||
projectRoot?: string,
|
||||
): Promise<SkillDefinition[]> => {
|
||||
const root = projectRoot ?? process.cwd();
|
||||
const allAgents: SkillDefinition[] = [];
|
||||
|
||||
for (const [dirName, source] of SOURCE_DIRS) {
|
||||
const dir = join(root, dirName);
|
||||
const files = await scanDirectory(dir, source);
|
||||
|
||||
for (const file of files) {
|
||||
try {
|
||||
const parsed = parseAgentFile(file);
|
||||
const skill = toSkillDefinition(parsed);
|
||||
allAgents.push(skill);
|
||||
} catch {
|
||||
// Skip unparseable files
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return allAgents;
|
||||
};
|
||||
|
||||
/**
|
||||
* Load a specific external agent by source and filename
|
||||
*/
|
||||
export const loadExternalAgentByPath = async (
|
||||
filePath: string,
|
||||
source: SkillSource,
|
||||
): Promise<SkillDefinition | null> => {
|
||||
try {
|
||||
const content = await fs.readFile(filePath, "utf-8");
|
||||
const file: ExternalAgentFile = {
|
||||
relativePath: basename(filePath),
|
||||
absolutePath: filePath,
|
||||
source,
|
||||
content,
|
||||
};
|
||||
const parsed = parseAgentFile(file);
|
||||
return toSkillDefinition(parsed);
|
||||
} catch {
|
||||
return null;
|
||||
}
|
||||
};
|
||||
|
||||
/**
|
||||
* Check if any external agent directories exist
|
||||
*/
|
||||
export const hasExternalAgents = async (
|
||||
projectRoot?: string,
|
||||
): Promise<boolean> => {
|
||||
const root = projectRoot ?? process.cwd();
|
||||
|
||||
for (const [dirName] of SOURCE_DIRS) {
|
||||
try {
|
||||
await fs.access(join(root, dirName));
|
||||
return true;
|
||||
} catch {
|
||||
continue;
|
||||
}
|
||||
}
|
||||
|
||||
return false;
|
||||
};
|
||||
353
src/services/keybind-resolver.ts
Normal file
353
src/services/keybind-resolver.ts
Normal file
@@ -0,0 +1,353 @@
|
||||
/**
|
||||
* Keybind Resolver
|
||||
*
|
||||
* Parses keybind strings (e.g., "ctrl+c", "<leader>m", "shift+return,ctrl+return"),
|
||||
* expands leader-key prefixes, and matches incoming key events against configured bindings.
|
||||
*
|
||||
* Keybind string format:
|
||||
* - "ctrl+c" → single combo
|
||||
* - "ctrl+c,ctrl+d" → two alternatives (either triggers)
|
||||
* - "<leader>m" → leader prefix + key (expands based on configured leader)
|
||||
* - "none" → binding disabled
|
||||
* - "escape" → single key without modifiers
|
||||
*/
|
||||
|
||||
import fs from "fs/promises";
|
||||
import { FILES } from "@constants/paths";
|
||||
import {
|
||||
DEFAULT_KEYBINDS,
|
||||
DEFAULT_LEADER,
|
||||
type KeybindAction,
|
||||
} from "@constants/keybinds";
|
||||
|
||||
// ============================================================================
|
||||
// Types
|
||||
// ============================================================================
|
||||
|
||||
/** A single parsed key combination */
|
||||
export interface ParsedCombo {
|
||||
key: string;
|
||||
ctrl: boolean;
|
||||
alt: boolean;
|
||||
shift: boolean;
|
||||
meta: boolean;
|
||||
}
|
||||
|
||||
/** A resolved keybinding: one action → one or more alternative combos */
|
||||
export interface ResolvedKeybind {
|
||||
action: KeybindAction;
|
||||
combos: ParsedCombo[];
|
||||
raw: string;
|
||||
}
|
||||
|
||||
/** The incoming key event from the TUI framework */
|
||||
export interface KeyEvent {
|
||||
name: string;
|
||||
ctrl?: boolean;
|
||||
alt?: boolean;
|
||||
shift?: boolean;
|
||||
meta?: boolean;
|
||||
}
|
||||
|
||||
/** User-provided overrides (partial, only the actions they want to change) */
|
||||
export type KeybindOverrides = Partial<Record<KeybindAction, string>>;
|
||||
|
||||
/** Full resolved keybind map */
|
||||
export type ResolvedKeybindMap = Map<KeybindAction, ResolvedKeybind>;
|
||||
|
||||
// ============================================================================
|
||||
// Parsing
|
||||
// ============================================================================
|
||||
|
||||
/**
|
||||
* Expand `<leader>` references in a keybind string.
|
||||
* E.g., with leader="ctrl+x":
|
||||
* "<leader>m" → "ctrl+x+m"
|
||||
* "<leader>q" → "ctrl+x+q"
|
||||
*/
|
||||
const expandLeader = (raw: string, leader: string): string => {
|
||||
return raw.replace(/<leader>/gi, `${leader}+`);
|
||||
};
|
||||
|
||||
/**
|
||||
* Parse a single key combo string like "ctrl+shift+s" into a ParsedCombo.
|
||||
*/
|
||||
const parseCombo = (combo: string): ParsedCombo => {
|
||||
const parts = combo
|
||||
.trim()
|
||||
.toLowerCase()
|
||||
.split("+")
|
||||
.map((p) => p.trim())
|
||||
.filter(Boolean);
|
||||
|
||||
const result: ParsedCombo = {
|
||||
key: "",
|
||||
ctrl: false,
|
||||
alt: false,
|
||||
shift: false,
|
||||
meta: false,
|
||||
};
|
||||
|
||||
for (const part of parts) {
|
||||
switch (part) {
|
||||
case "ctrl":
|
||||
case "control":
|
||||
result.ctrl = true;
|
||||
break;
|
||||
case "alt":
|
||||
case "option":
|
||||
result.alt = true;
|
||||
break;
|
||||
case "shift":
|
||||
result.shift = true;
|
||||
break;
|
||||
case "meta":
|
||||
case "cmd":
|
||||
case "super":
|
||||
case "win":
|
||||
result.meta = true;
|
||||
break;
|
||||
default:
|
||||
// Last non-modifier part is the key name
|
||||
result.key = part;
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
return result;
|
||||
};
|
||||
|
||||
/**
|
||||
* Parse a full keybind string (possibly comma-separated) into an array of combos.
|
||||
* Returns empty array for "none" (disabled binding).
|
||||
*/
|
||||
const parseKeybindString = (
|
||||
raw: string,
|
||||
leader: string,
|
||||
): ParsedCombo[] => {
|
||||
const trimmed = raw.trim().toLowerCase();
|
||||
if (trimmed === "none" || trimmed === "") return [];
|
||||
|
||||
const expanded = expandLeader(raw, leader);
|
||||
const alternatives = expanded.split(",");
|
||||
|
||||
return alternatives
|
||||
.map((alt) => parseCombo(alt))
|
||||
.filter((combo) => combo.key !== "");
|
||||
};
|
||||
|
||||
// ============================================================================
|
||||
// Matching
|
||||
// ============================================================================
|
||||
|
||||
/**
|
||||
* Check if a key event matches a parsed combo.
|
||||
*/
|
||||
const matchesCombo = (event: KeyEvent, combo: ParsedCombo): boolean => {
|
||||
const eventKey = event.name?.toLowerCase() ?? "";
|
||||
if (eventKey !== combo.key) return false;
|
||||
if (!!event.ctrl !== combo.ctrl) return false;
|
||||
if (!!event.alt !== combo.alt) return false;
|
||||
if (!!event.shift !== combo.shift) return false;
|
||||
if (!!event.meta !== combo.meta) return false;
|
||||
return true;
|
||||
};
|
||||
|
||||
// ============================================================================
|
||||
// Resolver State
|
||||
// ============================================================================
|
||||
|
||||
let resolvedMap: ResolvedKeybindMap = new Map();
|
||||
let currentLeader: string = DEFAULT_LEADER;
|
||||
let initialized = false;
|
||||
|
||||
/**
|
||||
* Build the resolved keybind map from defaults + overrides.
|
||||
*/
|
||||
const buildResolvedMap = (
|
||||
leader: string,
|
||||
overrides: KeybindOverrides,
|
||||
): ResolvedKeybindMap => {
|
||||
const map = new Map<KeybindAction, ResolvedKeybind>();
|
||||
|
||||
const merged = { ...DEFAULT_KEYBINDS, ...overrides };
|
||||
|
||||
for (const [action, raw] of Object.entries(merged)) {
|
||||
const combos = parseKeybindString(raw, leader);
|
||||
map.set(action as KeybindAction, {
|
||||
action: action as KeybindAction,
|
||||
combos,
|
||||
raw,
|
||||
});
|
||||
}
|
||||
|
||||
return map;
|
||||
};
|
||||
|
||||
// ============================================================================
|
||||
// Public API
|
||||
// ============================================================================
|
||||
|
||||
/**
|
||||
* Initialize the keybind resolver.
|
||||
* Loads user overrides from keybindings.json if it exists.
|
||||
*/
|
||||
export const initializeKeybinds = async (): Promise<void> => {
|
||||
let overrides: KeybindOverrides = {};
|
||||
let leader = DEFAULT_LEADER;
|
||||
|
||||
try {
|
||||
const data = await fs.readFile(FILES.keybindings, "utf-8");
|
||||
const parsed = JSON.parse(data) as Record<string, unknown>;
|
||||
|
||||
if (typeof parsed.leader === "string") {
|
||||
leader = parsed.leader;
|
||||
}
|
||||
|
||||
// Extract keybind overrides (anything that's not "leader")
|
||||
for (const [key, value] of Object.entries(parsed)) {
|
||||
if (key === "leader") continue;
|
||||
if (typeof value === "string") {
|
||||
overrides[key as KeybindAction] = value;
|
||||
}
|
||||
}
|
||||
} catch {
|
||||
// File doesn't exist or is invalid — use defaults only
|
||||
}
|
||||
|
||||
currentLeader = leader;
|
||||
resolvedMap = buildResolvedMap(leader, overrides);
|
||||
initialized = true;
|
||||
};
|
||||
|
||||
/**
|
||||
* Re-initialize with explicit overrides (for programmatic use).
|
||||
*/
|
||||
export const setKeybindOverrides = (
|
||||
overrides: KeybindOverrides,
|
||||
leader?: string,
|
||||
): void => {
|
||||
currentLeader = leader ?? currentLeader;
|
||||
resolvedMap = buildResolvedMap(currentLeader, overrides);
|
||||
initialized = true;
|
||||
};
|
||||
|
||||
/**
|
||||
* Check if a key event matches a specific action.
|
||||
*/
|
||||
export const matchesAction = (
|
||||
event: KeyEvent,
|
||||
action: KeybindAction,
|
||||
): boolean => {
|
||||
if (!initialized) {
|
||||
// Lazy init with defaults if not yet initialized
|
||||
resolvedMap = buildResolvedMap(DEFAULT_LEADER, {});
|
||||
initialized = true;
|
||||
}
|
||||
|
||||
const resolved = resolvedMap.get(action);
|
||||
if (!resolved) return false;
|
||||
|
||||
return resolved.combos.some((combo) => matchesCombo(event, combo));
|
||||
};
|
||||
|
||||
/**
|
||||
* Find which action(s) a key event matches.
|
||||
* Returns all matching actions (there may be overlaps).
|
||||
*/
|
||||
export const findMatchingActions = (event: KeyEvent): KeybindAction[] => {
|
||||
if (!initialized) {
|
||||
resolvedMap = buildResolvedMap(DEFAULT_LEADER, {});
|
||||
initialized = true;
|
||||
}
|
||||
|
||||
const matches: KeybindAction[] = [];
|
||||
|
||||
for (const [action, resolved] of resolvedMap) {
|
||||
if (resolved.combos.some((combo) => matchesCombo(event, combo))) {
|
||||
matches.push(action);
|
||||
}
|
||||
}
|
||||
|
||||
return matches;
|
||||
};
|
||||
|
||||
/**
|
||||
* Get the resolved keybind for an action (for display in help menus).
|
||||
*/
|
||||
export const getKeybindDisplay = (action: KeybindAction): string => {
|
||||
if (!initialized) {
|
||||
resolvedMap = buildResolvedMap(DEFAULT_LEADER, {});
|
||||
initialized = true;
|
||||
}
|
||||
|
||||
const resolved = resolvedMap.get(action);
|
||||
if (!resolved || resolved.combos.length === 0) return "none";
|
||||
|
||||
return resolved.combos
|
||||
.map((combo) => formatCombo(combo))
|
||||
.join(" / ");
|
||||
};
|
||||
|
||||
/**
|
||||
* Format a parsed combo back to a human-readable string.
|
||||
* E.g., { ctrl: true, key: "c" } → "Ctrl+C"
|
||||
*/
|
||||
const formatCombo = (combo: ParsedCombo): string => {
|
||||
const parts: string[] = [];
|
||||
if (combo.ctrl) parts.push("Ctrl");
|
||||
if (combo.alt) parts.push("Alt");
|
||||
if (combo.shift) parts.push("Shift");
|
||||
if (combo.meta) parts.push("Cmd");
|
||||
|
||||
const keyDisplay =
|
||||
combo.key.length === 1
|
||||
? combo.key.toUpperCase()
|
||||
: combo.key === "return"
|
||||
? "Enter"
|
||||
: combo.key === "escape"
|
||||
? "Esc"
|
||||
: combo.key.charAt(0).toUpperCase() + combo.key.slice(1);
|
||||
|
||||
parts.push(keyDisplay);
|
||||
return parts.join("+");
|
||||
};
|
||||
|
||||
/**
|
||||
* Get all resolved keybinds (for help display or debugging).
|
||||
*/
|
||||
export const getAllKeybinds = (): ResolvedKeybind[] => {
|
||||
if (!initialized) {
|
||||
resolvedMap = buildResolvedMap(DEFAULT_LEADER, {});
|
||||
initialized = true;
|
||||
}
|
||||
return Array.from(resolvedMap.values());
|
||||
};
|
||||
|
||||
/**
|
||||
* Get the current leader key string.
|
||||
*/
|
||||
export const getLeader = (): string => currentLeader;
|
||||
|
||||
/**
|
||||
* Save current keybind overrides to keybindings.json.
|
||||
*/
|
||||
export const saveKeybindOverrides = async (
|
||||
overrides: KeybindOverrides,
|
||||
leader?: string,
|
||||
): Promise<void> => {
|
||||
const { mkdir, writeFile } = await import("fs/promises");
|
||||
const { dirname } = await import("path");
|
||||
|
||||
const filepath = FILES.keybindings;
|
||||
await mkdir(dirname(filepath), { recursive: true });
|
||||
|
||||
const data: Record<string, string> = {};
|
||||
if (leader) data.leader = leader;
|
||||
|
||||
for (const [action, value] of Object.entries(overrides)) {
|
||||
data[action] = value;
|
||||
}
|
||||
|
||||
await writeFile(filepath, JSON.stringify(data, null, 2), "utf-8");
|
||||
};
|
||||
@@ -39,6 +39,10 @@ interface JsonRpcResponse {
|
||||
export class MCPClient {
|
||||
private config: MCPServerConfig;
|
||||
private process: ChildProcess | null = null;
|
||||
/** Base URL for http / sse transport */
|
||||
private httpUrl: string | null = null;
|
||||
/** Session URL returned by the server after SSE handshake (if any) */
|
||||
private httpSessionUrl: string | null = null;
|
||||
private state: MCPConnectionState = "disconnected";
|
||||
private tools: MCPToolDefinition[] = [];
|
||||
private resources: MCPResourceDefinition[] = [];
|
||||
@@ -71,6 +75,13 @@ export class MCPClient {
|
||||
};
|
||||
}
|
||||
|
||||
/**
|
||||
* Resolve effective transport: `type` takes precedence over legacy `transport`
|
||||
*/
|
||||
private get transport(): "stdio" | "sse" | "http" {
|
||||
return this.config.type ?? "stdio";
|
||||
}
|
||||
|
||||
/**
|
||||
* Connect to the MCP server
|
||||
*/
|
||||
@@ -83,12 +94,13 @@ export class MCPClient {
|
||||
this.error = undefined;
|
||||
|
||||
try {
|
||||
if (this.config.transport === "stdio" || !this.config.transport) {
|
||||
const t = this.transport;
|
||||
if (t === "stdio") {
|
||||
await this.connectStdio();
|
||||
} else if (t === "http" || t === "sse") {
|
||||
await this.connectHttp();
|
||||
} else {
|
||||
throw new Error(
|
||||
`Transport type '${this.config.transport}' not yet supported`,
|
||||
);
|
||||
throw new Error(`Transport type '${t}' is not supported`);
|
||||
}
|
||||
|
||||
// Initialize the connection
|
||||
@@ -109,13 +121,17 @@ export class MCPClient {
|
||||
* Connect via stdio transport
|
||||
*/
|
||||
private async connectStdio(): Promise<void> {
|
||||
if (!this.config.command) {
|
||||
throw new Error("Command is required for stdio transport");
|
||||
}
|
||||
|
||||
return new Promise((resolve, reject) => {
|
||||
const env = {
|
||||
...process.env,
|
||||
...this.config.env,
|
||||
};
|
||||
|
||||
this.process = spawn(this.config.command, this.config.args || [], {
|
||||
this.process = spawn(this.config.command!, this.config.args || [], {
|
||||
stdio: ["pipe", "pipe", "pipe"],
|
||||
env,
|
||||
});
|
||||
@@ -146,11 +162,38 @@ export class MCPClient {
|
||||
}
|
||||
});
|
||||
|
||||
// Give the process a moment to start
|
||||
// Give the stdio process a moment to start
|
||||
setTimeout(resolve, 100);
|
||||
});
|
||||
}
|
||||
|
||||
/**
|
||||
* Connect via HTTP (Streamable HTTP) transport.
|
||||
* The server URL is used directly for JSON-RPC over HTTP POST.
|
||||
*/
|
||||
private async connectHttp(): Promise<void> {
|
||||
const url = this.config.url;
|
||||
if (!url) {
|
||||
throw new Error("URL is required for http/sse transport");
|
||||
}
|
||||
this.httpUrl = url;
|
||||
|
||||
// Verify the server is reachable with a simple OPTIONS/HEAD check
|
||||
try {
|
||||
const res = await fetch(url, { method: "POST", headers: { "Content-Type": "application/json" }, body: JSON.stringify({ jsonrpc: "2.0", id: 0, method: "ping" }) });
|
||||
// Even a 4xx/5xx means the server is reachable; we'll handle errors in initialize()
|
||||
if (!res.ok && res.status >= 500) {
|
||||
throw new Error(`Server returned ${res.status}: ${res.statusText}`);
|
||||
}
|
||||
} catch (err) {
|
||||
if (err instanceof TypeError) {
|
||||
// Network/fetch error
|
||||
throw new Error(`Cannot reach MCP server at ${url}: ${(err as Error).message}`);
|
||||
}
|
||||
// Other errors (like 400) are OK — the server is reachable
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Handle incoming data from the server
|
||||
*/
|
||||
@@ -189,11 +232,24 @@ export class MCPClient {
|
||||
}
|
||||
|
||||
/**
|
||||
* Send a JSON-RPC request
|
||||
* Send a JSON-RPC request (dispatches to stdio or http)
|
||||
*/
|
||||
private async sendRequest(
|
||||
method: string,
|
||||
params?: unknown,
|
||||
): Promise<unknown> {
|
||||
if (this.httpUrl) {
|
||||
return this.sendHttpRequest(method, params);
|
||||
}
|
||||
return this.sendStdioRequest(method, params);
|
||||
}
|
||||
|
||||
/**
|
||||
* Send a JSON-RPC request via stdio
|
||||
*/
|
||||
private async sendStdioRequest(
|
||||
method: string,
|
||||
params?: unknown,
|
||||
): Promise<unknown> {
|
||||
if (!this.process?.stdin) {
|
||||
throw new Error("Not connected");
|
||||
@@ -225,6 +281,72 @@ export class MCPClient {
|
||||
});
|
||||
}
|
||||
|
||||
/**
|
||||
* Send a JSON-RPC request via HTTP POST
|
||||
*/
|
||||
private async sendHttpRequest(
|
||||
method: string,
|
||||
params?: unknown,
|
||||
): Promise<unknown> {
|
||||
const url = this.httpSessionUrl ?? this.httpUrl!;
|
||||
const id = ++this.requestId;
|
||||
const body: JsonRpcRequest = { jsonrpc: "2.0", id, method, params };
|
||||
|
||||
const res = await fetch(url, {
|
||||
method: "POST",
|
||||
headers: {
|
||||
"Content-Type": "application/json",
|
||||
Accept: "application/json, text/event-stream",
|
||||
},
|
||||
body: JSON.stringify(body),
|
||||
signal: AbortSignal.timeout(30000),
|
||||
});
|
||||
|
||||
if (!res.ok) {
|
||||
const text = await res.text().catch(() => "");
|
||||
throw new Error(`MCP HTTP error ${res.status}: ${text || res.statusText}`);
|
||||
}
|
||||
|
||||
// Capture session URL from Mcp-Session header if present
|
||||
const sessionHeader = res.headers.get("mcp-session");
|
||||
if (sessionHeader && !this.httpSessionUrl) {
|
||||
// If it's a full URL use it; otherwise it's a session id
|
||||
this.httpSessionUrl = sessionHeader.startsWith("http")
|
||||
? sessionHeader
|
||||
: this.httpUrl!;
|
||||
}
|
||||
|
||||
const contentType = res.headers.get("content-type") ?? "";
|
||||
|
||||
// Handle SSE responses (text/event-stream) — collect the last JSON-RPC result
|
||||
if (contentType.includes("text/event-stream")) {
|
||||
const text = await res.text();
|
||||
let lastResult: unknown = undefined;
|
||||
for (const line of text.split("\n")) {
|
||||
if (line.startsWith("data: ")) {
|
||||
const json = line.slice(6).trim();
|
||||
if (json && json !== "[DONE]") {
|
||||
try {
|
||||
const parsed = JSON.parse(json) as JsonRpcResponse;
|
||||
if (parsed.error) throw new Error(parsed.error.message);
|
||||
lastResult = parsed.result;
|
||||
} catch {
|
||||
// skip unparseable lines
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
return lastResult;
|
||||
}
|
||||
|
||||
// Standard JSON response
|
||||
const json = (await res.json()) as JsonRpcResponse;
|
||||
if (json.error) {
|
||||
throw new Error(json.error.message);
|
||||
}
|
||||
return json.result;
|
||||
}
|
||||
|
||||
/**
|
||||
* Initialize the MCP connection
|
||||
*/
|
||||
@@ -242,7 +364,18 @@ export class MCPClient {
|
||||
});
|
||||
|
||||
// Send initialized notification
|
||||
if (this.process?.stdin) {
|
||||
if (this.httpUrl) {
|
||||
// For HTTP transport, send as a JSON-RPC notification (no id)
|
||||
const url = this.httpSessionUrl ?? this.httpUrl;
|
||||
await fetch(url, {
|
||||
method: "POST",
|
||||
headers: { "Content-Type": "application/json" },
|
||||
body: JSON.stringify({
|
||||
jsonrpc: "2.0",
|
||||
method: "notifications/initialized",
|
||||
}),
|
||||
}).catch(() => { /* ignore notification failures */ });
|
||||
} else if (this.process?.stdin) {
|
||||
this.process.stdin.write(
|
||||
JSON.stringify({
|
||||
jsonrpc: "2.0",
|
||||
@@ -344,6 +477,8 @@ export class MCPClient {
|
||||
this.process.kill();
|
||||
this.process = null;
|
||||
}
|
||||
this.httpUrl = null;
|
||||
this.httpSessionUrl = null;
|
||||
this.state = "disconnected";
|
||||
this.tools = [];
|
||||
this.resources = [];
|
||||
|
||||
@@ -37,7 +37,7 @@ interface MCPManagerState {
|
||||
*/
|
||||
const state: MCPManagerState = {
|
||||
clients: new Map(),
|
||||
config: { servers: {} },
|
||||
config: { inputs: [], servers: {} },
|
||||
initialized: false,
|
||||
};
|
||||
|
||||
@@ -53,17 +53,49 @@ const loadConfigFile = async (filePath: string): Promise<MCPConfig | null> => {
|
||||
}
|
||||
};
|
||||
|
||||
/**
|
||||
* Inject the runtime `name` field from the config key into each server entry.
|
||||
* Also normalises legacy `transport` field → `type`.
|
||||
*/
|
||||
const hydrateServerNames = (
|
||||
servers: Record<string, MCPServerConfig>,
|
||||
): Record<string, MCPServerConfig> => {
|
||||
const hydrated: Record<string, MCPServerConfig> = {};
|
||||
for (const [key, cfg] of Object.entries(servers)) {
|
||||
// Normalise legacy `transport` → `type`
|
||||
const type = cfg.type ?? (cfg as Record<string, unknown>).transport as MCPServerConfig["type"];
|
||||
hydrated[key] = { ...cfg, name: key, type };
|
||||
}
|
||||
return hydrated;
|
||||
};
|
||||
|
||||
/**
|
||||
* Build a clean server config object for disk persistence.
|
||||
* Strips the runtime-only `name` field so the JSON matches:
|
||||
* { "servers": { "<name>": { "type": "http", "url": "..." } } }
|
||||
*/
|
||||
const toStorableConfig = (config: MCPServerConfig): Omit<MCPServerConfig, "name"> => {
|
||||
const { name: _name, ...rest } = config;
|
||||
// Remove undefined fields to keep JSON clean
|
||||
return Object.fromEntries(
|
||||
Object.entries(rest).filter(([, v]) => v !== undefined),
|
||||
) as Omit<MCPServerConfig, "name">;
|
||||
};
|
||||
|
||||
/**
|
||||
* Load MCP configuration (merges global + local)
|
||||
*/
|
||||
export const loadMCPConfig = async (): Promise<MCPConfig> => {
|
||||
const globalConfig = await loadConfigFile(CONFIG_LOCATIONS.global);
|
||||
const localConfig = await loadConfigFile(CONFIG_LOCATIONS.local);
|
||||
const globalConfig =
|
||||
(await loadConfigFile(CONFIG_LOCATIONS.global)) || { inputs: [], servers: {} };
|
||||
const localConfig =
|
||||
(await loadConfigFile(CONFIG_LOCATIONS.local)) || { inputs: [], servers: {} };
|
||||
|
||||
const merged: MCPConfig = {
|
||||
inputs: [...(globalConfig?.inputs || []), ...(localConfig?.inputs || [])],
|
||||
servers: {
|
||||
...(globalConfig?.servers || {}),
|
||||
...(localConfig?.servers || {}),
|
||||
...hydrateServerNames(globalConfig?.servers || {}),
|
||||
...hydrateServerNames(localConfig?.servers || {}),
|
||||
},
|
||||
};
|
||||
|
||||
@@ -71,7 +103,8 @@ export const loadMCPConfig = async (): Promise<MCPConfig> => {
|
||||
};
|
||||
|
||||
/**
|
||||
* Save MCP configuration
|
||||
* Save MCP configuration.
|
||||
* Strips runtime-only `name` fields from server entries before writing.
|
||||
*/
|
||||
export const saveMCPConfig = async (
|
||||
config: MCPConfig,
|
||||
@@ -80,8 +113,19 @@ export const saveMCPConfig = async (
|
||||
const filePath = global ? CONFIG_LOCATIONS.global : CONFIG_LOCATIONS.local;
|
||||
const dir = path.dirname(filePath);
|
||||
|
||||
// Strip runtime `name` from each server entry before persisting
|
||||
const cleanServers: Record<string, Omit<MCPServerConfig, "name">> = {};
|
||||
for (const [key, srv] of Object.entries(config.servers)) {
|
||||
cleanServers[key] = toStorableConfig(srv);
|
||||
}
|
||||
|
||||
const output: MCPConfig = {
|
||||
inputs: config.inputs ?? [],
|
||||
servers: cleanServers as Record<string, MCPServerConfig>,
|
||||
};
|
||||
|
||||
await fs.mkdir(dir, { recursive: true });
|
||||
await fs.writeFile(filePath, JSON.stringify(config, null, 2), "utf-8");
|
||||
await fs.writeFile(filePath, JSON.stringify(output, null, 2), "utf-8");
|
||||
};
|
||||
|
||||
/**
|
||||
@@ -250,14 +294,24 @@ export const addServer = async (
|
||||
await initializeMCP();
|
||||
|
||||
const targetConfig = global
|
||||
? (await loadConfigFile(CONFIG_LOCATIONS.global)) || { servers: {} }
|
||||
: (await loadConfigFile(CONFIG_LOCATIONS.local)) || { servers: {} };
|
||||
? (await loadConfigFile(CONFIG_LOCATIONS.global)) || { inputs: [], servers: {} }
|
||||
: (await loadConfigFile(CONFIG_LOCATIONS.local)) || { inputs: [], servers: {} };
|
||||
|
||||
targetConfig.servers[name] = { ...config, name };
|
||||
if (targetConfig.servers[name]) {
|
||||
throw new Error(`Server '${name}' already exists`);
|
||||
}
|
||||
|
||||
// Also check in-memory merged config for duplicates across scopes
|
||||
if (state.config.servers[name]) {
|
||||
throw new Error(`Server '${name}' already exists`);
|
||||
}
|
||||
|
||||
// Store without the `name` field — the key is the name
|
||||
targetConfig.servers[name] = toStorableConfig(config as MCPServerConfig);
|
||||
|
||||
await saveMCPConfig(targetConfig, global);
|
||||
|
||||
// Update in-memory config
|
||||
// Update in-memory config with runtime name injected
|
||||
state.config.servers[name] = { ...config, name };
|
||||
};
|
||||
|
||||
@@ -275,6 +329,7 @@ export const removeServer = async (
|
||||
|
||||
if (config?.servers[name]) {
|
||||
delete config.servers[name];
|
||||
config.inputs = config.inputs || [];
|
||||
await saveMCPConfig(config, global);
|
||||
}
|
||||
|
||||
|
||||
@@ -309,7 +309,7 @@ export const isServerInstalled = (serverId: string): boolean => {
|
||||
return Array.from(instances.values()).some(
|
||||
(instance) =>
|
||||
instance.config.name === serverId ||
|
||||
instance.config.name.toLowerCase() === serverId.toLowerCase(),
|
||||
(instance.config.name ?? "").toLowerCase() === serverId.toLowerCase(),
|
||||
);
|
||||
};
|
||||
|
||||
@@ -338,16 +338,22 @@ export const installServer = async (
|
||||
|
||||
try {
|
||||
// Add server to configuration
|
||||
await addServer(
|
||||
server.id,
|
||||
{
|
||||
command: server.command,
|
||||
args: customArgs || server.args,
|
||||
transport: server.transport,
|
||||
enabled: true,
|
||||
},
|
||||
global,
|
||||
);
|
||||
const serverType = server.transport ?? "stdio";
|
||||
const config: Omit<import("@/types/mcp").MCPServerConfig, "name"> =
|
||||
serverType === "stdio"
|
||||
? {
|
||||
type: "stdio",
|
||||
command: server.command,
|
||||
args: customArgs || server.args,
|
||||
enabled: true,
|
||||
}
|
||||
: {
|
||||
type: serverType,
|
||||
url: server.url,
|
||||
enabled: true,
|
||||
};
|
||||
|
||||
await addServer(server.id, config, global);
|
||||
|
||||
let connected = false;
|
||||
|
||||
|
||||
@@ -50,26 +50,59 @@ export const matchesBashPattern = (
|
||||
return cmdArgs === patternArgs;
|
||||
};
|
||||
|
||||
// =============================================================================
|
||||
// Command Chaining
|
||||
// =============================================================================
|
||||
|
||||
/**
|
||||
* Split a shell command on chaining operators (&&, ||, ;, |).
|
||||
* Respects quoted strings. Prevents pattern bypass via
|
||||
* "cd /safe && rm -rf /dangerous".
|
||||
*/
|
||||
const splitChainedCommands = (command: string): string[] => {
|
||||
const parts: string[] = [];
|
||||
let current = "";
|
||||
let inSingle = false;
|
||||
let inDouble = false;
|
||||
|
||||
for (let i = 0; i < command.length; i++) {
|
||||
const ch = command[i];
|
||||
const next = command[i + 1];
|
||||
|
||||
if (ch === "'" && !inDouble) { inSingle = !inSingle; current += ch; continue; }
|
||||
if (ch === '"' && !inSingle) { inDouble = !inDouble; current += ch; continue; }
|
||||
if (inSingle || inDouble) { current += ch; continue; }
|
||||
|
||||
if (ch === "&" && next === "&") { parts.push(current); current = ""; i++; continue; }
|
||||
if (ch === "|" && next === "|") { parts.push(current); current = ""; i++; continue; }
|
||||
if (ch === ";") { parts.push(current); current = ""; continue; }
|
||||
if (ch === "|") { parts.push(current); current = ""; continue; }
|
||||
|
||||
current += ch;
|
||||
}
|
||||
|
||||
if (current.trim()) parts.push(current);
|
||||
return parts.map((p) => p.trim()).filter(Boolean);
|
||||
};
|
||||
|
||||
// =============================================================================
|
||||
// Index-Based Matching
|
||||
// =============================================================================
|
||||
|
||||
/**
|
||||
* Check if a command is allowed by any pattern in the index
|
||||
* Check if a command is allowed by any pattern in the index.
|
||||
* For chained commands (&&, ||, ;, |), EVERY sub-command must be allowed.
|
||||
*/
|
||||
export const isBashAllowedByIndex = (
|
||||
command: string,
|
||||
index: PatternIndex,
|
||||
): boolean => {
|
||||
const subCommands = splitChainedCommands(command);
|
||||
const bashPatterns = getPatternsForTool(index, "Bash");
|
||||
|
||||
for (const entry of bashPatterns) {
|
||||
if (matchesBashPattern(command, entry.parsed)) {
|
||||
return true;
|
||||
}
|
||||
}
|
||||
|
||||
return false;
|
||||
return subCommands.every((subCmd) =>
|
||||
bashPatterns.some((entry) => matchesBashPattern(subCmd, entry.parsed)),
|
||||
);
|
||||
};
|
||||
|
||||
/**
|
||||
|
||||
@@ -405,67 +405,77 @@ export const getActivePlans = (): ImplementationPlan[] => {
|
||||
};
|
||||
|
||||
/**
|
||||
* Format a plan for display
|
||||
* Risk level display icons
|
||||
*/
|
||||
const RISK_ICONS: Record<string, string> = {
|
||||
high: "!",
|
||||
medium: "~",
|
||||
low: " ",
|
||||
};
|
||||
|
||||
/**
|
||||
* Format a plan for display (terminal-friendly, no markdown)
|
||||
*/
|
||||
export const formatPlanForDisplay = (plan: ImplementationPlan): string => {
|
||||
const lines: string[] = [];
|
||||
|
||||
lines.push(`# Implementation Plan: ${plan.title}`);
|
||||
lines.push(`Plan to implement`);
|
||||
lines.push("");
|
||||
lines.push(plan.title);
|
||||
lines.push("");
|
||||
lines.push(`## Summary`);
|
||||
lines.push(plan.summary);
|
||||
lines.push("");
|
||||
|
||||
if (plan.context.filesAnalyzed.length > 0) {
|
||||
lines.push(`## Files Analyzed`);
|
||||
plan.context.filesAnalyzed.forEach(f => lines.push(`- ${f}`));
|
||||
lines.push("Files Analyzed");
|
||||
plan.context.filesAnalyzed.forEach(f => lines.push(` ${f}`));
|
||||
lines.push("");
|
||||
}
|
||||
|
||||
if (plan.context.currentArchitecture) {
|
||||
lines.push(`## Current Architecture`);
|
||||
lines.push(plan.context.currentArchitecture);
|
||||
lines.push("Current Architecture");
|
||||
lines.push(` ${plan.context.currentArchitecture}`);
|
||||
lines.push("");
|
||||
}
|
||||
|
||||
lines.push(`## Implementation Steps`);
|
||||
plan.steps.forEach((step, i) => {
|
||||
const riskIcon = step.riskLevel === "high" ? "⚠️" : step.riskLevel === "medium" ? "⚡" : "✓";
|
||||
lines.push(`${i + 1}. ${riskIcon} **${step.title}**`);
|
||||
lines.push(` ${step.description}`);
|
||||
if (step.filesAffected.length > 0) {
|
||||
lines.push(` Files: ${step.filesAffected.join(", ")}`);
|
||||
}
|
||||
});
|
||||
lines.push("");
|
||||
|
||||
if (plan.risks.length > 0) {
|
||||
lines.push(`## Risks`);
|
||||
plan.risks.forEach(risk => {
|
||||
lines.push(`- **${risk.impact.toUpperCase()}**: ${risk.description}`);
|
||||
lines.push(` Mitigation: ${risk.mitigation}`);
|
||||
if (plan.steps.length > 0) {
|
||||
lines.push("Implementation Steps");
|
||||
plan.steps.forEach((step, i) => {
|
||||
const icon = RISK_ICONS[step.riskLevel] ?? " ";
|
||||
lines.push(` ${i + 1}. [${icon}] ${step.title}`);
|
||||
lines.push(` ${step.description}`);
|
||||
if (step.filesAffected.length > 0) {
|
||||
lines.push(` Files: ${step.filesAffected.join(", ")}`);
|
||||
}
|
||||
});
|
||||
lines.push("");
|
||||
}
|
||||
|
||||
lines.push(`## Testing Strategy`);
|
||||
lines.push(plan.testingStrategy || "TBD");
|
||||
lines.push("");
|
||||
if (plan.risks.length > 0) {
|
||||
lines.push("Risks");
|
||||
plan.risks.forEach(risk => {
|
||||
lines.push(` [${risk.impact.toUpperCase()}] ${risk.description}`);
|
||||
lines.push(` Mitigation: ${risk.mitigation}`);
|
||||
});
|
||||
lines.push("");
|
||||
}
|
||||
|
||||
lines.push(`## Rollback Plan`);
|
||||
lines.push(plan.rollbackPlan || "TBD");
|
||||
lines.push("");
|
||||
if (plan.testingStrategy) {
|
||||
lines.push("Testing Strategy");
|
||||
lines.push(` ${plan.testingStrategy}`);
|
||||
lines.push("");
|
||||
}
|
||||
|
||||
lines.push(`## Estimated Changes`);
|
||||
lines.push(`- Files to create: ${plan.estimatedChanges.filesCreated}`);
|
||||
lines.push(`- Files to modify: ${plan.estimatedChanges.filesModified}`);
|
||||
lines.push(`- Files to delete: ${plan.estimatedChanges.filesDeleted}`);
|
||||
lines.push("");
|
||||
if (plan.rollbackPlan) {
|
||||
lines.push("Rollback Plan");
|
||||
lines.push(` ${plan.rollbackPlan}`);
|
||||
lines.push("");
|
||||
}
|
||||
|
||||
lines.push("---");
|
||||
lines.push("**Awaiting approval to proceed with implementation.**");
|
||||
lines.push("Reply with 'proceed', 'approve', or 'go ahead' to start execution.");
|
||||
lines.push("Reply with 'stop', 'cancel', or provide feedback to modify the plan.");
|
||||
lines.push("Estimated Changes");
|
||||
lines.push(` Files to create: ${plan.estimatedChanges.filesCreated}`);
|
||||
lines.push(` Files to modify: ${plan.estimatedChanges.filesModified}`);
|
||||
lines.push(` Files to delete: ${plan.estimatedChanges.filesDeleted}`);
|
||||
|
||||
return lines.join("\n");
|
||||
};
|
||||
|
||||
@@ -14,6 +14,7 @@
|
||||
|
||||
import { v4 as uuidv4 } from "uuid";
|
||||
import type { Message } from "@/types/providers";
|
||||
import { getMessageText } from "@/types/providers";
|
||||
import type { AgentOptions } from "@interfaces/AgentOptions";
|
||||
import type { AgentResult } from "@interfaces/AgentResult";
|
||||
import type {
|
||||
@@ -245,13 +246,13 @@ const convertToCompressibleMessages = (
|
||||
|
||||
if ("tool_calls" in msg) {
|
||||
role = "assistant";
|
||||
content = msg.content || JSON.stringify(msg.tool_calls);
|
||||
content = (typeof msg.content === "string" ? msg.content : getMessageText(msg.content ?? "")) || JSON.stringify(msg.tool_calls);
|
||||
} else if ("tool_call_id" in msg) {
|
||||
role = "tool";
|
||||
content = msg.content;
|
||||
content = typeof msg.content === "string" ? msg.content : getMessageText(msg.content);
|
||||
} else {
|
||||
role = msg.role as "user" | "assistant" | "system";
|
||||
content = msg.content;
|
||||
content = typeof msg.content === "string" ? msg.content : getMessageText(msg.content);
|
||||
}
|
||||
|
||||
return {
|
||||
@@ -322,7 +323,8 @@ export const runReasoningAgentLoop = async (
|
||||
await refreshMCPTools();
|
||||
|
||||
let agentMessages: AgentMessage[] = [...messages];
|
||||
const originalQuery = messages.find((m) => m.role === "user")?.content || "";
|
||||
const originalQueryContent = messages.find((m) => m.role === "user")?.content;
|
||||
const originalQuery = originalQueryContent ? getMessageText(originalQueryContent) : "";
|
||||
const previousAttempts: AttemptRecord[] = [];
|
||||
|
||||
while (iterations < maxIterations) {
|
||||
|
||||
244
src/services/skill-detector.ts
Normal file
244
src/services/skill-detector.ts
Normal file
@@ -0,0 +1,244 @@
|
||||
/**
|
||||
* Skill Auto-Detector
|
||||
*
|
||||
* Analyzes user prompts to automatically detect and activate
|
||||
* relevant skills based on keywords, file extensions, and context.
|
||||
* Skills are selected AFTER plans are approved and before agent execution.
|
||||
*/
|
||||
|
||||
import {
|
||||
SKILL_DETECTION_KEYWORDS,
|
||||
SKILL_AUTO_DETECT_THRESHOLD,
|
||||
SKILL_AUTO_DETECT_MAX,
|
||||
} from "@constants/skills";
|
||||
import type { SkillDefinition, AutoDetectedSkill } from "@/types/skills";
|
||||
|
||||
// ============================================================================
|
||||
// Keyword Matching
|
||||
// ============================================================================
|
||||
|
||||
/**
|
||||
* Score a prompt against the keyword detection table.
|
||||
* Returns a map of skillId → { totalScore, matchedKeywords, category }.
|
||||
*/
|
||||
const scorePromptKeywords = (
|
||||
prompt: string,
|
||||
): Map<
|
||||
string,
|
||||
{ totalScore: number; matchedKeywords: string[]; category: string }
|
||||
> => {
|
||||
const lower = prompt.toLowerCase();
|
||||
const scores = new Map<
|
||||
string,
|
||||
{ totalScore: number; matchedKeywords: string[]; category: string }
|
||||
>();
|
||||
|
||||
for (const [keyword, skillId, category, weight] of SKILL_DETECTION_KEYWORDS) {
|
||||
const keyLower = keyword.toLowerCase();
|
||||
|
||||
// Check for whole-word or phrase match
|
||||
const hasMatch = matchKeyword(lower, keyLower);
|
||||
if (!hasMatch) continue;
|
||||
|
||||
const existing = scores.get(skillId);
|
||||
if (existing) {
|
||||
existing.totalScore = Math.min(1, existing.totalScore + weight * 0.3);
|
||||
existing.matchedKeywords.push(keyword);
|
||||
} else {
|
||||
scores.set(skillId, {
|
||||
totalScore: weight,
|
||||
matchedKeywords: [keyword],
|
||||
category,
|
||||
});
|
||||
}
|
||||
}
|
||||
|
||||
return scores;
|
||||
};
|
||||
|
||||
/**
|
||||
* Check if a keyword appears in text (word-boundary aware)
|
||||
*/
|
||||
const matchKeyword = (text: string, keyword: string): boolean => {
|
||||
// For short keywords (1-3 chars), require word boundaries
|
||||
if (keyword.length <= 3) {
|
||||
const regex = new RegExp(`\\b${escapeRegex(keyword)}\\b`, "i");
|
||||
return regex.test(text);
|
||||
}
|
||||
|
||||
// For file extensions, match exactly
|
||||
if (keyword.startsWith(".")) {
|
||||
return text.includes(keyword);
|
||||
}
|
||||
|
||||
// For longer keywords/phrases, simple includes is fine
|
||||
return text.includes(keyword);
|
||||
};
|
||||
|
||||
/**
|
||||
* Escape special regex characters
|
||||
*/
|
||||
const escapeRegex = (str: string): string => {
|
||||
return str.replace(/[.*+?^${}()|[\]\\]/g, "\\$&");
|
||||
};
|
||||
|
||||
// ============================================================================
|
||||
// Context Analysis
|
||||
// ============================================================================
|
||||
|
||||
/**
|
||||
* Analyze file references in the prompt for additional skill signals
|
||||
*/
|
||||
const analyzeFileReferences = (
|
||||
prompt: string,
|
||||
): Map<string, number> => {
|
||||
const signals = new Map<string, number>();
|
||||
|
||||
// TypeScript/JavaScript files
|
||||
if (/\.(ts|tsx)\b/.test(prompt)) {
|
||||
signals.set("typescript", (signals.get("typescript") ?? 0) + 0.3);
|
||||
}
|
||||
if (/\.(jsx)\b/.test(prompt)) {
|
||||
signals.set("react", (signals.get("react") ?? 0) + 0.4);
|
||||
}
|
||||
|
||||
// Style files
|
||||
if (/\.(css|scss|sass|less)\b/.test(prompt)) {
|
||||
signals.set("css-scss", (signals.get("css-scss") ?? 0) + 0.4);
|
||||
}
|
||||
|
||||
// Config files
|
||||
if (/docker(file|-compose)|\.dockerfile/i.test(prompt)) {
|
||||
signals.set("devops", (signals.get("devops") ?? 0) + 0.5);
|
||||
}
|
||||
if (/\.github\/workflows/i.test(prompt)) {
|
||||
signals.set("devops", (signals.get("devops") ?? 0) + 0.5);
|
||||
}
|
||||
|
||||
// Test files
|
||||
if (/\.(test|spec)\.(ts|tsx|js|jsx)\b/.test(prompt)) {
|
||||
signals.set("testing", (signals.get("testing") ?? 0) + 0.5);
|
||||
}
|
||||
|
||||
// Database-related files
|
||||
if (/\.(sql|prisma)\b/.test(prompt) || /migration/i.test(prompt)) {
|
||||
signals.set("database", (signals.get("database") ?? 0) + 0.4);
|
||||
}
|
||||
|
||||
return signals;
|
||||
};
|
||||
|
||||
// ============================================================================
|
||||
// Public API
|
||||
// ============================================================================
|
||||
|
||||
/**
|
||||
* Detect which skills should be activated for a given user prompt.
|
||||
* Returns up to SKILL_AUTO_DETECT_MAX skills sorted by confidence.
|
||||
*
|
||||
* @param prompt - The user's message
|
||||
* @param availableSkills - All registered skills to match against
|
||||
* @returns Detected skills with confidence scores
|
||||
*/
|
||||
export const detectSkillsForPrompt = (
|
||||
prompt: string,
|
||||
availableSkills: SkillDefinition[],
|
||||
): AutoDetectedSkill[] => {
|
||||
// Step 1: Score keywords
|
||||
const keywordScores = scorePromptKeywords(prompt);
|
||||
|
||||
// Step 2: Analyze file references for bonus signals
|
||||
const fileSignals = analyzeFileReferences(prompt);
|
||||
|
||||
// Step 3: Merge file signals into keyword scores
|
||||
for (const [skillId, bonus] of fileSignals) {
|
||||
const existing = keywordScores.get(skillId);
|
||||
if (existing) {
|
||||
existing.totalScore = Math.min(1, existing.totalScore + bonus);
|
||||
} else {
|
||||
keywordScores.set(skillId, {
|
||||
totalScore: bonus,
|
||||
matchedKeywords: [`(file pattern)`],
|
||||
category: "file",
|
||||
});
|
||||
}
|
||||
}
|
||||
|
||||
// Step 4: Match against available skills and filter by threshold
|
||||
const detected: AutoDetectedSkill[] = [];
|
||||
|
||||
for (const [skillId, score] of keywordScores) {
|
||||
if (score.totalScore < SKILL_AUTO_DETECT_THRESHOLD) continue;
|
||||
|
||||
// Find the matching skill definition
|
||||
const skill = availableSkills.find(
|
||||
(s) => s.id === skillId && s.autoTrigger !== false,
|
||||
);
|
||||
if (!skill) continue;
|
||||
|
||||
detected.push({
|
||||
skill,
|
||||
confidence: Math.min(1, score.totalScore),
|
||||
matchedKeywords: score.matchedKeywords,
|
||||
category: score.category,
|
||||
});
|
||||
}
|
||||
|
||||
// Step 5: Sort by confidence and limit
|
||||
detected.sort((a, b) => b.confidence - a.confidence);
|
||||
return detected.slice(0, SKILL_AUTO_DETECT_MAX);
|
||||
};
|
||||
|
||||
/**
|
||||
* Build a skill injection prompt from detected skills.
|
||||
* This is appended to the system prompt to give the agent
|
||||
* specialized knowledge for the current task.
|
||||
*/
|
||||
export const buildSkillInjection = (
|
||||
detectedSkills: AutoDetectedSkill[],
|
||||
): string => {
|
||||
if (detectedSkills.length === 0) return "";
|
||||
|
||||
const parts: string[] = [
|
||||
"# Activated Skills",
|
||||
"",
|
||||
"The following specialized skills have been activated for this task. " +
|
||||
"Use their guidelines and best practices when applicable:",
|
||||
"",
|
||||
];
|
||||
|
||||
for (const { skill, confidence, matchedKeywords } of detectedSkills) {
|
||||
parts.push(`## Skill: ${skill.name} (confidence: ${(confidence * 100).toFixed(0)}%)`);
|
||||
parts.push(`Matched: ${matchedKeywords.join(", ")}`);
|
||||
parts.push("");
|
||||
|
||||
if (skill.systemPrompt) {
|
||||
parts.push(skill.systemPrompt);
|
||||
parts.push("");
|
||||
}
|
||||
|
||||
if (skill.instructions) {
|
||||
parts.push(skill.instructions);
|
||||
parts.push("");
|
||||
}
|
||||
|
||||
parts.push("---");
|
||||
parts.push("");
|
||||
}
|
||||
|
||||
return parts.join("\n");
|
||||
};
|
||||
|
||||
/**
|
||||
* Format detected skills for logging/display
|
||||
*/
|
||||
export const formatDetectedSkills = (
|
||||
detectedSkills: AutoDetectedSkill[],
|
||||
): string => {
|
||||
if (detectedSkills.length === 0) return "No skills auto-detected.";
|
||||
|
||||
const names = detectedSkills.map(
|
||||
(d) => `${d.skill.name} (${(d.confidence * 100).toFixed(0)}%)`,
|
||||
);
|
||||
return `Skills activated: ${names.join(", ")}`;
|
||||
};
|
||||
@@ -3,16 +3,24 @@
|
||||
*
|
||||
* Manages skill registration, matching, and invocation.
|
||||
* Uses progressive disclosure to load skills on demand.
|
||||
* Merges built-in skills with external agents from .claude/, .github/, .codetyper/.
|
||||
*/
|
||||
|
||||
import { SKILL_MATCHING, SKILL_LOADING, SKILL_ERRORS } from "@constants/skills";
|
||||
import { loadAllSkills, loadSkillById } from "@services/skill-loader";
|
||||
import { loadExternalAgents } from "@services/external-agent-loader";
|
||||
import {
|
||||
detectSkillsForPrompt,
|
||||
buildSkillInjection,
|
||||
formatDetectedSkills,
|
||||
} from "@services/skill-detector";
|
||||
import type {
|
||||
SkillDefinition,
|
||||
SkillMatch,
|
||||
SkillContext,
|
||||
SkillExecutionResult,
|
||||
SkillRegistryState,
|
||||
AutoDetectedSkill,
|
||||
} from "@/types/skills";
|
||||
|
||||
// ============================================================================
|
||||
@@ -21,6 +29,7 @@ import type {
|
||||
|
||||
let registryState: SkillRegistryState = {
|
||||
skills: new Map(),
|
||||
externalAgents: new Map(),
|
||||
lastLoadedAt: null,
|
||||
loadErrors: [],
|
||||
};
|
||||
@@ -30,6 +39,7 @@ let registryState: SkillRegistryState = {
|
||||
*/
|
||||
export const getRegistryState = (): SkillRegistryState => ({
|
||||
skills: new Map(registryState.skills),
|
||||
externalAgents: new Map(registryState.externalAgents),
|
||||
lastLoadedAt: registryState.lastLoadedAt,
|
||||
loadErrors: [...registryState.loadErrors],
|
||||
});
|
||||
@@ -48,17 +58,33 @@ const isCacheStale = (): boolean => {
|
||||
|
||||
/**
|
||||
* Initialize skill registry with all available skills
|
||||
* (built-in + user + project + external agents)
|
||||
*/
|
||||
export const initializeRegistry = async (): Promise<void> => {
|
||||
try {
|
||||
const skills = await loadAllSkills("metadata");
|
||||
// Load built-in and user/project skills
|
||||
const skills = await loadAllSkills("full");
|
||||
registryState.skills.clear();
|
||||
registryState.externalAgents.clear();
|
||||
registryState.loadErrors = [];
|
||||
|
||||
for (const skill of skills) {
|
||||
registryState.skills.set(skill.id, skill);
|
||||
}
|
||||
|
||||
// Load external agents from .claude/, .github/, .codetyper/
|
||||
try {
|
||||
const externalAgents = await loadExternalAgents();
|
||||
for (const agent of externalAgents) {
|
||||
registryState.externalAgents.set(agent.id, agent);
|
||||
// Also register external agents as regular skills for unified matching
|
||||
registryState.skills.set(agent.id, agent);
|
||||
}
|
||||
} catch (error) {
|
||||
const msg = error instanceof Error ? error.message : String(error);
|
||||
registryState.loadErrors.push(`External agents: ${msg}`);
|
||||
}
|
||||
|
||||
registryState.lastLoadedAt = Date.now();
|
||||
} catch (error) {
|
||||
const message = error instanceof Error ? error.message : String(error);
|
||||
@@ -362,6 +388,67 @@ export const executeFromInput = async (
|
||||
});
|
||||
};
|
||||
|
||||
// ============================================================================
|
||||
// Auto-Detection
|
||||
// ============================================================================
|
||||
|
||||
/**
|
||||
* Auto-detect skills relevant to a user prompt.
|
||||
* Analyzes the prompt content and returns matching skills
|
||||
* sorted by confidence.
|
||||
*/
|
||||
export const autoDetectSkills = async (
|
||||
prompt: string,
|
||||
): Promise<AutoDetectedSkill[]> => {
|
||||
await refreshIfNeeded();
|
||||
const allSkills = getAllSkills();
|
||||
return detectSkillsForPrompt(prompt, allSkills);
|
||||
};
|
||||
|
||||
/**
|
||||
* Build a skill injection prompt for detected skills.
|
||||
* This should be appended to the system prompt or inserted
|
||||
* as a system message before the agent processes the prompt.
|
||||
*/
|
||||
export const buildSkillInjectionForPrompt = async (
|
||||
prompt: string,
|
||||
): Promise<{ injection: string; detected: AutoDetectedSkill[] }> => {
|
||||
const detected = await autoDetectSkills(prompt);
|
||||
const injection = buildSkillInjection(detected);
|
||||
return { injection, detected };
|
||||
};
|
||||
|
||||
/**
|
||||
* Get a human-readable summary of detected skills for logging
|
||||
*/
|
||||
export const getDetectedSkillsSummary = (
|
||||
detected: AutoDetectedSkill[],
|
||||
): string => {
|
||||
return formatDetectedSkills(detected);
|
||||
};
|
||||
|
||||
// ============================================================================
|
||||
// External Agent Access
|
||||
// ============================================================================
|
||||
|
||||
/**
|
||||
* Get all loaded external agents
|
||||
*/
|
||||
export const getExternalAgents = (): SkillDefinition[] => {
|
||||
return Array.from(registryState.externalAgents.values());
|
||||
};
|
||||
|
||||
/**
|
||||
* Get external agents by source
|
||||
*/
|
||||
export const getExternalAgentsBySource = (
|
||||
source: string,
|
||||
): SkillDefinition[] => {
|
||||
return Array.from(registryState.externalAgents.values()).filter(
|
||||
(agent) => agent.source === source,
|
||||
);
|
||||
};
|
||||
|
||||
// ============================================================================
|
||||
// Utility Functions
|
||||
// ============================================================================
|
||||
|
||||
Reference in New Issue
Block a user