Add BRAIN_DISABLED flag and fix Ollama tool call formatting

Features:
  - Add BRAIN_DISABLED feature flag to hide all Brain functionality
  - When enabled, hides Brain banner, status indicator, menu, and commands
  - Flag location: src/constants/brain.ts

  Fixes:
  - Fix Ollama 400 error by properly formatting tool_calls in messages
  - Update OllamaMessage type to include tool_calls field
  - Fix Brain menu keyboard not working (add missing modes to isMenuOpen)

  UI Changes:
  - Remove "^Tab toggle mode" hint from status bar
  - Remove "ctrl+t to hide todos" hint from status bar

  Files modified:
  - src/constants/brain.ts (add BRAIN_DISABLED flag)
  - src/types/ollama.ts (add tool_calls to OllamaMessage)
  - src/providers/ollama/chat.ts (format tool_calls in messages)
  - src/tui-solid/components/header.tsx (hide Brain UI when disabled)
  - src/tui-solid/components/status-bar.tsx (remove hints)
  - src/tui-solid/components/command-menu.tsx (filter brain command)
  - src/tui-solid/components/input-area.tsx (fix isMenuOpen modes)
  - src/tui-solid/routes/session.tsx (skip brain menu when disabled)
  - src/services/brain.ts (early return when disabled)
  - src/services/chat-tui/initialize.ts (skip brain init when disabled)
This commit is contained in:
2026-02-02 13:25:38 -05:00
parent 2eadda584a
commit c839fc4d68
114 changed files with 17243 additions and 273 deletions

View File

@@ -1,8 +1,10 @@
# CodeTyper CLI
An AI-powered terminal coding agent with an interactive TUI. CodeTyper autonomously executes coding tasks using tool calls with granular permission controls and intelligent provider routing.
<p align="center">
<img src="assets/Codetyper_logo.png" alt="CodeTyper Logo" width="400">
</p>
![CodeTyper Welcome Screen](assets/CodetyperLogin.png)
An AI-powered terminal coding agent with an interactive TUI. CodeTyper autonomously executes coding tasks using tool calls with granular permission controls and intelligent provider routing.
## How It Works
@@ -85,7 +87,7 @@ Full-screen terminal interface with real-time streaming responses.
- `Enter` - Send message
- `Shift+Enter` - New line
- `/` - Open command menu
- `Ctrl+Tab` - Toggle interaction mode
- `Ctrl+M` - Toggle interaction mode
- `Ctrl+T` - Toggle todo panel
- `Shift+Up/Down` - Scroll log panel
- `Ctrl+C` (twice) - Exit

BIN
assets/Codetyper_logo.png Normal file

Binary file not shown.

After

Width:  |  Height:  |  Size: 478 KiB

55
assets/ascii-art.txt Normal file
View File

@@ -0,0 +1,55 @@
######### #########
%%%%%%%%%%%%% %%%%%%%%%%%%%
%%%##++++*%%%%%######=######=#%####%%%%%*+++=*#%%%
%%%*=+++++*%%%#######=######=#######%%%++++++=*%%%
%%%*=++++*#%%%%#++##%=#%###%=%##++#%%%%#*++++=*%%%
@%%%#*+*#%%%%%%##+*%%=++##++=%%#=##%%%%%%#*+*#%%%@
@%%%%%%%%%%%%%%#*+#=#+##*#=#*+#%%%%%%%%%%%%%%@
%%%%%%###%%%###+=#*##*#=+#####%#######%%%
%%%%%%%*=*#####**##+##*##**######+*###%%%%
%%%%%%%%%#%%%%%%%#+*+##**+##%%%%%%%%%##%%%%%
%%%%%%%%##%%%%%%#%%+=##+=#%%%%%%%%%%%%%%%%%%
%%%###%%%%#%%@%%%%%%#%%%#%%%%%%%%%@%%%%###%%%%%%
%%%##%%%%##%%%%##%%%%*+++++#%%%%*%%%%###%%%%%%%%
%%%%%%%%%%%%#%%%%%%*=------=+#%%%%%%%%%%%%%%%%%%
%%%%%%%%%%####%%%#+=---------=#%%%%%%%##%%#%%%%%
%%%%%%%%########=-----------*##%#%%%%###%%%%
%%%%%%%#####%%*=---++++++----=%%%%%%####%%%%
%%%%%%%####%%*--+*%%%%%%#*--=%%%%#%%##%%%%
@%%%%%####%%*--+%%@@%%%%#--=%%#%%%#%%%%%
%#%% %%%%%####%%*-===#%%%%%+====%%#%%%%%%%% %%%%
%##%%% %%%%%%%%%#====+*%%#+====+%%%%%%%%%@%%%%%%
%%%##%@@%%%%%%%%%#+==*%%%##+==##%%%%%%%%@@%%%%%%
%%%%%%%%@@@@%%@@@@%%#******##%%@@@@%%@@@%%%%%%%%
%%%%###%%%%@@@@@@@@@%++++++%@@@@@@@@@@%%####%#%%
%#%%####%%%@@@@@@@@@@%%@@@@@@@@@@%%%##%%%##%
%%%%###%%#%%%%@@@@@@@@@@@@@@@@%%%%%%%%%%%%%%
%#%##%%##%%%%@@@@@@@@@@@@%%%%%%%%%%%%%
%%%#########%%%@@@@@@@@%%%###%%%###%%%
%###########%@#*****%%#####%%###%%
%%%%%####%%#+==++==+*%%%%###%%%%
%%%####%%*======+=+%%%%###%%
%%##%%*=+==-=#=+%%##%#%
%%%%%+=++===*==%%#%%@
%#%========== %%@
======

367
src/api/brain/index.ts Normal file
View File

@@ -0,0 +1,367 @@
/**
* Brain API Layer
*
* Low-level HTTP API calls to the CodeTyper Brain service
*/
import got from "got";
import {
BRAIN_DEFAULTS,
BRAIN_ENDPOINTS,
BRAIN_TIMEOUTS,
BRAIN_HEADERS,
} from "@constants/brain";
import type {
BrainHealthResponse,
BrainLoginResponse,
BrainRegisterResponse,
BrainRecallRequest,
BrainRecallResponse,
BrainLearnConceptRequest,
BrainApiResponse,
BrainConcept,
BrainContextRequest,
BrainContextResponse,
BrainExtractRequest,
BrainExtractResponse,
BrainMemorySearchRequest,
BrainMemorySearchResponse,
BrainStoreMemoryRequest,
BrainKnowledgeStats,
BrainMemoryStats,
BrainUser,
} from "@/types/brain";
/**
* Build request headers with API key
*/
const buildHeaders = (
apiKey?: string,
accessToken?: string,
): Record<string, string> => ({
[BRAIN_HEADERS.CONTENT_TYPE]: "application/json",
...(apiKey ? { [BRAIN_HEADERS.API_KEY]: apiKey } : {}),
...(accessToken
? { [BRAIN_HEADERS.AUTHORIZATION]: `Bearer ${accessToken}` }
: {}),
});
/**
* Get base URL for Brain API
*/
const getBaseUrl = (customUrl?: string): string => {
return customUrl ?? BRAIN_DEFAULTS.BASE_URL;
};
// ============================================================================
// Health Check
// ============================================================================
/**
* Check if Brain service is healthy
*/
export const checkHealth = async (
baseUrl?: string,
): Promise<BrainHealthResponse> => {
const url = `${getBaseUrl(baseUrl)}${BRAIN_ENDPOINTS.HEALTH}`;
const response = await got
.get(url, {
timeout: { request: BRAIN_TIMEOUTS.HEALTH },
})
.json<BrainHealthResponse>();
return response;
};
// ============================================================================
// Authentication
// ============================================================================
/**
* Register a new user
*/
export const register = async (
email: string,
password: string,
displayName: string,
baseUrl?: string,
): Promise<BrainRegisterResponse> => {
const url = `${getBaseUrl(baseUrl)}${BRAIN_ENDPOINTS.AUTH_REGISTER}`;
const response = await got
.post(url, {
json: { email, password, display_name: displayName },
timeout: { request: BRAIN_TIMEOUTS.AUTH },
})
.json<BrainRegisterResponse>();
return response;
};
/**
* Login with email and password
*/
export const login = async (
email: string,
password: string,
baseUrl?: string,
): Promise<BrainLoginResponse> => {
const url = `${getBaseUrl(baseUrl)}${BRAIN_ENDPOINTS.AUTH_LOGIN}`;
const response = await got
.post(url, {
json: { email, password },
timeout: { request: BRAIN_TIMEOUTS.AUTH },
})
.json<BrainLoginResponse>();
return response;
};
/**
* Logout (revoke refresh token)
*/
export const logout = async (
refreshToken: string,
baseUrl?: string,
): Promise<void> => {
const url = `${getBaseUrl(baseUrl)}${BRAIN_ENDPOINTS.AUTH_LOGOUT}`;
await got.post(url, {
json: { refresh_token: refreshToken },
timeout: { request: BRAIN_TIMEOUTS.AUTH },
});
};
/**
* Refresh access token
*/
export const refreshToken = async (
refreshTokenValue: string,
baseUrl?: string,
): Promise<BrainLoginResponse> => {
const url = `${getBaseUrl(baseUrl)}${BRAIN_ENDPOINTS.AUTH_REFRESH}`;
const response = await got
.post(url, {
json: { refresh_token: refreshTokenValue },
timeout: { request: BRAIN_TIMEOUTS.AUTH },
})
.json<BrainLoginResponse>();
return response;
};
/**
* Get current authenticated user
*/
export const getCurrentUser = async (
accessToken: string,
baseUrl?: string,
): Promise<BrainApiResponse<BrainUser>> => {
const url = `${getBaseUrl(baseUrl)}${BRAIN_ENDPOINTS.AUTH_ME}`;
const response = await got
.get(url, {
...{ headers: buildHeaders(undefined, accessToken) },
timeout: { request: BRAIN_TIMEOUTS.AUTH },
})
.json<BrainApiResponse<BrainUser>>();
return response;
};
// ============================================================================
// Knowledge Graph
// ============================================================================
/**
* Recall relevant concepts from the knowledge graph
*/
export const recallKnowledge = async (
request: BrainRecallRequest,
apiKey: string,
baseUrl?: string,
): Promise<BrainRecallResponse> => {
const url = `${getBaseUrl(baseUrl)}${BRAIN_ENDPOINTS.KNOWLEDGE_RECALL}`;
const response = await got
.post(url, {
...{ headers: buildHeaders(apiKey) },
json: request,
timeout: { request: BRAIN_TIMEOUTS.KNOWLEDGE },
})
.json<BrainRecallResponse>();
return response;
};
/**
* Learn/store a concept in the knowledge graph
*/
export const learnConcept = async (
request: BrainLearnConceptRequest,
apiKey: string,
baseUrl?: string,
): Promise<BrainApiResponse<BrainConcept>> => {
const url = `${getBaseUrl(baseUrl)}${BRAIN_ENDPOINTS.KNOWLEDGE_LEARN}`;
const response = await got
.post(url, {
...{ headers: buildHeaders(apiKey) },
json: request,
timeout: { request: BRAIN_TIMEOUTS.KNOWLEDGE },
})
.json<BrainApiResponse<BrainConcept>>();
return response;
};
/**
* Build context string for prompt injection
*/
export const buildContext = async (
request: BrainContextRequest,
apiKey: string,
baseUrl?: string,
): Promise<BrainContextResponse> => {
const url = `${getBaseUrl(baseUrl)}${BRAIN_ENDPOINTS.KNOWLEDGE_CONTEXT}`;
const response = await got
.post(url, {
...{ headers: buildHeaders(apiKey) },
json: request,
timeout: { request: BRAIN_TIMEOUTS.KNOWLEDGE },
})
.json<BrainContextResponse>();
return response;
};
/**
* Extract concepts from text content
*/
export const extractConcepts = async (
request: BrainExtractRequest,
apiKey: string,
baseUrl?: string,
): Promise<BrainExtractResponse> => {
const url = `${getBaseUrl(baseUrl)}${BRAIN_ENDPOINTS.KNOWLEDGE_EXTRACT}`;
const response = await got
.post(url, {
...{ headers: buildHeaders(apiKey) },
json: request,
timeout: { request: BRAIN_TIMEOUTS.EXTRACT },
})
.json<BrainExtractResponse>();
return response;
};
/**
* Get knowledge stats for a project
*/
export const getKnowledgeStats = async (
projectId: number,
apiKey: string,
baseUrl?: string,
): Promise<BrainApiResponse<BrainKnowledgeStats>> => {
const url = `${getBaseUrl(baseUrl)}${BRAIN_ENDPOINTS.KNOWLEDGE_STATS}?project_id=${projectId}`;
const response = await got
.get(url, {
...{ headers: buildHeaders(apiKey) },
timeout: { request: BRAIN_TIMEOUTS.KNOWLEDGE },
})
.json<BrainApiResponse<BrainKnowledgeStats>>();
return response;
};
/**
* List all concepts for a project
*/
export const listConcepts = async (
projectId: number,
apiKey: string,
baseUrl?: string,
): Promise<BrainApiResponse<BrainConcept[]>> => {
const url = `${getBaseUrl(baseUrl)}${BRAIN_ENDPOINTS.KNOWLEDGE_CONCEPTS}?project_id=${projectId}`;
const response = await got
.get(url, {
...{ headers: buildHeaders(apiKey) },
timeout: { request: BRAIN_TIMEOUTS.KNOWLEDGE },
})
.json<BrainApiResponse<BrainConcept[]>>();
return response;
};
// ============================================================================
// Memory
// ============================================================================
/**
* Search for relevant memories
*/
export const searchMemories = async (
request: BrainMemorySearchRequest,
apiKey: string,
baseUrl?: string,
): Promise<BrainMemorySearchResponse> => {
const url = `${getBaseUrl(baseUrl)}${BRAIN_ENDPOINTS.MEMORY_SEARCH}`;
const response = await got
.post(url, {
...{ headers: buildHeaders(apiKey) },
json: request,
timeout: { request: BRAIN_TIMEOUTS.MEMORY },
})
.json<BrainMemorySearchResponse>();
return response;
};
/**
* Store a memory
*/
export const storeMemory = async (
request: BrainStoreMemoryRequest,
apiKey: string,
baseUrl?: string,
): Promise<BrainApiResponse<{ id: number }>> => {
const url = `${getBaseUrl(baseUrl)}${BRAIN_ENDPOINTS.MEMORY_STORE}`;
const response = await got
.post(url, {
...{ headers: buildHeaders(apiKey) },
json: request,
timeout: { request: BRAIN_TIMEOUTS.MEMORY },
})
.json<BrainApiResponse<{ id: number }>>();
return response;
};
/**
* Get memory stats
*/
export const getMemoryStats = async (
apiKey: string,
baseUrl?: string,
): Promise<BrainMemoryStats> => {
const url = `${getBaseUrl(baseUrl)}${BRAIN_ENDPOINTS.MEMORY_STATS}`;
const response = await got
.get(url, {
...{ headers: buildHeaders(apiKey) },
timeout: { request: BRAIN_TIMEOUTS.MEMORY },
})
.json<BrainMemoryStats>();
return response;
};
/**
* Check memory status
*/
export const checkMemoryStatus = async (
baseUrl?: string,
): Promise<BrainApiResponse<{ available: boolean }>> => {
const url = `${getBaseUrl(baseUrl)}${BRAIN_ENDPOINTS.MEMORY_STATUS}`;
const response = await got
.get(url, {
timeout: { request: BRAIN_TIMEOUTS.MEMORY },
})
.json<BrainApiResponse<{ available: boolean }>>();
return response;
};

View File

@@ -7,3 +7,4 @@
export * as copilotApi from "@api/copilot";
export * as ollamaApi from "@api/ollama";
export * as brainApi from "@api/brain";

View File

@@ -1,6 +1,7 @@
import { tui, appStore } from "@tui/index";
import { getProviderInfo } from "@services/chat-tui-service";
import { addServer, connectServer } from "@services/mcp/index";
import * as brainService from "@services/brain";
import type { ChatServiceState } from "@services/chat-tui-service";
import type { AgentConfig } from "@/types/agent-config";
import type { PermissionScope, LearningScope } from "@/types/tui";
@@ -32,6 +33,9 @@ export interface RenderAppProps {
scope?: LearningScope,
editedContent?: string,
) => void;
handleBrainSetJwtToken?: (jwtToken: string) => Promise<void>;
handleBrainSetApiKey?: (apiKey: string) => Promise<void>;
handleBrainLogout?: () => Promise<void>;
handleExit: () => void;
showBanner: boolean;
state: ChatServiceState;
@@ -65,6 +69,42 @@ const defaultHandleMCPAdd = async (data: MCPAddFormData): Promise<void> => {
await connectServer(data.name);
};
const defaultHandleBrainSetJwtToken = async (jwtToken: string): Promise<void> => {
await brainService.setJwtToken(jwtToken);
const connected = await brainService.connect();
if (connected) {
const state = brainService.getState();
appStore.setBrainStatus("connected");
appStore.setBrainUser(state.user);
appStore.setBrainCounts(state.knowledgeCount, state.memoryCount);
appStore.setBrainShowBanner(false);
} else {
throw new Error("Failed to connect with the provided JWT token.");
}
};
const defaultHandleBrainSetApiKey = async (apiKey: string): Promise<void> => {
await brainService.setApiKey(apiKey);
const connected = await brainService.connect();
if (connected) {
const state = brainService.getState();
appStore.setBrainStatus("connected");
appStore.setBrainUser(state.user);
appStore.setBrainCounts(state.knowledgeCount, state.memoryCount);
appStore.setBrainShowBanner(false);
} else {
throw new Error("Failed to connect with the provided API key.");
}
};
const defaultHandleBrainLogout = async (): Promise<void> => {
await brainService.logout();
appStore.setBrainStatus("disconnected");
appStore.setBrainUser(null);
appStore.setBrainCounts(0, 0);
appStore.setBrainShowBanner(true);
};
export const renderApp = async (props: RenderAppProps): Promise<void> => {
const { displayName, model: defaultModel } = getProviderInfo(
props.state.provider,
@@ -95,6 +135,9 @@ export const renderApp = async (props: RenderAppProps): Promise<void> => {
onMCPAdd: props.handleMCPAdd ?? defaultHandleMCPAdd,
onPermissionResponse: props.handlePermissionResponse ?? (() => {}),
onLearningResponse: props.handleLearningResponse ?? (() => {}),
onBrainSetJwtToken: props.handleBrainSetJwtToken ?? defaultHandleBrainSetJwtToken,
onBrainSetApiKey: props.handleBrainSetApiKey ?? defaultHandleBrainSetApiKey,
onBrainLogout: props.handleBrainLogout ?? defaultHandleBrainLogout,
plan: props.plan,
});

View File

@@ -0,0 +1,66 @@
/**
* Agent definition constants
*/
export const AGENT_DEFINITION = {
FILE_EXTENSION: ".md",
DIRECTORY_NAME: "agents",
FRONTMATTER_DELIMITER: "---",
MAX_NAME_LENGTH: 50,
MAX_DESCRIPTION_LENGTH: 500,
MAX_TOOLS: 20,
MAX_TRIGGER_PHRASES: 10,
} as const;
export const AGENT_DEFINITION_PATHS = {
PROJECT: ".codetyper/agents",
GLOBAL: "~/.config/codetyper/agents",
BUILTIN: "src/agents",
} as const;
export const AGENT_DEFAULT_TOOLS = {
EXPLORE: ["read", "glob", "grep"],
PLAN: ["read", "glob", "grep", "web_search"],
CODE: ["read", "write", "edit", "glob", "grep", "bash"],
REVIEW: ["read", "glob", "grep", "lsp"],
BASH: ["bash", "read"],
} as const;
export const AGENT_COLORS = {
RED: "\x1b[31m",
GREEN: "\x1b[32m",
BLUE: "\x1b[34m",
YELLOW: "\x1b[33m",
CYAN: "\x1b[36m",
MAGENTA: "\x1b[35m",
WHITE: "\x1b[37m",
GRAY: "\x1b[90m",
RESET: "\x1b[0m",
} as const;
export const AGENT_TIER_CONFIG = {
fast: {
model: "gpt-4o-mini",
maxTurns: 5,
timeout: 30000,
},
balanced: {
model: "gpt-4o",
maxTurns: 10,
timeout: 60000,
},
thorough: {
model: "o1",
maxTurns: 20,
timeout: 120000,
},
} as const;
export const AGENT_MESSAGES = {
LOADING: "Loading agent definitions...",
LOADED: "Agent definitions loaded",
NOT_FOUND: "Agent definition not found",
INVALID_FRONTMATTER: "Invalid YAML frontmatter",
MISSING_REQUIRED: "Missing required field",
INVALID_TOOL: "Invalid tool specified",
} as const;

View File

@@ -0,0 +1,100 @@
/**
* Apply Patch Constants
*
* Configuration for unified diff parsing and application.
*/
/**
* Default configuration for patch application
*/
export const PATCH_DEFAULTS = {
FUZZ: 2,
MAX_FUZZ: 3,
IGNORE_WHITESPACE: false,
IGNORE_CASE: false,
CONTEXT_LINES: 3,
} as const;
/**
* Patch file patterns
*/
export const PATCH_PATTERNS = {
HUNK_HEADER: /^@@ -(\d+)(?:,(\d+))? \+(\d+)(?:,(\d+))? @@(.*)$/,
FILE_HEADER_OLD: /^--- (.+?)(?:\t.*)?$/,
FILE_HEADER_NEW: /^\+\+\+ (.+?)(?:\t.*)?$/,
GIT_DIFF: /^diff --git a\/(.+) b\/(.+)$/,
INDEX_LINE: /^index [a-f0-9]+\.\.[a-f0-9]+(?: \d+)?$/,
BINARY_FILE: /^Binary files .+ differ$/,
NEW_FILE: /^new file mode \d+$/,
DELETED_FILE: /^deleted file mode \d+$/,
RENAME_FROM: /^rename from (.+)$/,
RENAME_TO: /^rename to (.+)$/,
NO_NEWLINE: /^\\ No newline at end of file$/,
} as const;
/**
* Line type prefixes
*/
export const LINE_PREFIXES = {
CONTEXT: " ",
ADDITION: "+",
DELETION: "-",
} as const;
/**
* Error messages
*/
export const PATCH_ERRORS = {
INVALID_PATCH: "Invalid patch format",
PARSE_FAILED: (detail: string) => `Failed to parse patch: ${detail}`,
HUNK_FAILED: (index: number, reason: string) =>
`Hunk #${index + 1} failed: ${reason}`,
FILE_NOT_FOUND: (path: string) => `Target file not found: ${path}`,
CONTEXT_MISMATCH: (line: number) =>
`Context mismatch at line ${line}`,
FUZZY_MATCH_FAILED: (hunk: number) =>
`Could not find match for hunk #${hunk + 1} even with fuzzy matching`,
ALREADY_APPLIED: "Patch appears to be already applied",
REVERSED_PATCH: "Patch appears to be reversed",
BINARY_NOT_SUPPORTED: "Binary patches are not supported",
WRITE_FAILED: (path: string, error: string) =>
`Failed to write patched file ${path}: ${error}`,
} as const;
/**
* Success messages
*/
export const PATCH_MESSAGES = {
PARSING: "Parsing patch...",
APPLYING: (file: string) => `Applying patch to ${file}`,
APPLIED: (files: number, hunks: number) =>
`Successfully applied ${hunks} hunk(s) to ${files} file(s)`,
DRY_RUN: (files: number, hunks: number) =>
`Dry run: ${hunks} hunk(s) would be applied to ${files} file(s)`,
FUZZY_APPLIED: (hunk: number, offset: number) =>
`Hunk #${hunk + 1} applied with fuzzy offset of ${offset}`,
ROLLBACK_AVAILABLE: "Rollback is available if needed",
SKIPPED_BINARY: (file: string) => `Skipped binary file: ${file}`,
} as const;
/**
* Tool titles
*/
export const PATCH_TITLES = {
APPLYING: (file: string) => `Patching: ${file}`,
SUCCESS: (files: number) => `Patched ${files} file(s)`,
PARTIAL: (success: number, failed: number) =>
`Partial success: ${success} patched, ${failed} failed`,
FAILED: "Patch failed",
DRY_RUN: "Patch dry run",
VALIDATING: "Validating patch",
} as const;
/**
* Special path values
*/
export const SPECIAL_PATHS = {
DEV_NULL: "/dev/null",
A_PREFIX: "a/",
B_PREFIX: "b/",
} as const;

View File

@@ -0,0 +1,62 @@
/**
* Background task constants
*/
export const BACKGROUND_TASK = {
MAX_CONCURRENT: 3,
DEFAULT_TIMEOUT: 300000, // 5 minutes
MAX_TIMEOUT: 3600000, // 1 hour
POLL_INTERVAL: 1000, // 1 second
MAX_RETRIES: 3,
RETRY_DELAY: 5000, // 5 seconds
HISTORY_LIMIT: 100,
} as const;
export const BACKGROUND_TASK_STORAGE = {
DIRECTORY: ".codetyper/tasks",
FILE_EXTENSION: ".json",
MAX_FILE_SIZE: 10 * 1024 * 1024, // 10MB
} as const;
export const BACKGROUND_TASK_SHORTCUTS = {
START: "ctrl+b",
LIST: "ctrl+shift+b",
CANCEL: "ctrl+shift+c",
PAUSE: "ctrl+shift+p",
RESUME: "ctrl+shift+r",
} as const;
export const BACKGROUND_TASK_COMMANDS = {
START: "/background",
LIST: "/tasks",
CANCEL: "/task cancel",
STATUS: "/task status",
CLEAR: "/task clear",
} as const;
export const BACKGROUND_TASK_STATUS_ICONS = {
pending: "\u23F3", // hourglass
running: "\u25B6", // play
paused: "\u23F8", // pause
completed: "\u2705", // check
failed: "\u274C", // cross
cancelled: "\u23F9", // stop
} as const;
export const BACKGROUND_TASK_MESSAGES = {
STARTED: "Task started in background",
COMPLETED: "Background task completed",
FAILED: "Background task failed",
CANCELLED: "Background task cancelled",
PAUSED: "Background task paused",
RESUMED: "Background task resumed",
QUEUE_FULL: "Task queue is full",
NOT_FOUND: "Task not found",
ALREADY_RUNNING: "Task is already running",
} as const;
export const BACKGROUND_TASK_NOTIFICATIONS = {
SOUND_ENABLED: true,
DESKTOP_ENABLED: true,
INLINE_ENABLED: true,
} as const;

View File

@@ -0,0 +1,107 @@
/**
* Brain Cloud Sync Constants
*
* Configuration for cloud synchronization of brain data.
*/
import type { CloudBrainConfig } from "@/types/brain-cloud";
/**
* Default cloud configuration
*/
export const CLOUD_BRAIN_DEFAULTS: CloudBrainConfig = {
enabled: false,
endpoint: "https://brain.codetyper.dev/api/v1",
syncOnSessionEnd: true,
syncInterval: 300000, // 5 minutes
conflictStrategy: "local-wins",
retryAttempts: 3,
retryDelay: 1000,
} as const;
/**
* Cloud API endpoints
*/
export const CLOUD_ENDPOINTS = {
PUSH: "/sync/push",
PULL: "/sync/pull",
STATUS: "/sync/status",
CONFLICTS: "/sync/conflicts",
RESOLVE: "/sync/resolve",
HEALTH: "/health",
} as const;
/**
* Sync configuration
*/
export const SYNC_CONFIG = {
MAX_BATCH_SIZE: 100,
MAX_QUEUE_SIZE: 1000,
STALE_ITEM_AGE_MS: 86400000, // 24 hours
VERSION_KEY: "brain_sync_version",
QUEUE_KEY: "brain_offline_queue",
} as const;
/**
* Error messages
*/
export const CLOUD_ERRORS = {
NOT_CONFIGURED: "Cloud sync is not configured",
OFFLINE: "Device is offline",
SYNC_IN_PROGRESS: "Sync already in progress",
PUSH_FAILED: (error: string) => `Push failed: ${error}`,
PULL_FAILED: (error: string) => `Pull failed: ${error}`,
CONFLICT_UNRESOLVED: (count: number) =>
`${count} conflict(s) require manual resolution`,
QUEUE_FULL: "Offline queue is full",
VERSION_MISMATCH: "Version mismatch - full sync required",
AUTH_REQUIRED: "Authentication required for cloud sync",
INVALID_RESPONSE: "Invalid response from server",
} as const;
/**
* Status messages
*/
export const CLOUD_MESSAGES = {
STARTING_SYNC: "Starting cloud sync...",
PUSHING: (count: number) => `Pushing ${count} change(s)...`,
PULLING: (count: number) => `Pulling ${count} change(s)...`,
RESOLVING_CONFLICTS: (count: number) => `Resolving ${count} conflict(s)...`,
SYNC_COMPLETE: "Cloud sync complete",
SYNC_SKIPPED: "No changes to sync",
QUEUED_OFFLINE: (count: number) => `Queued ${count} change(s) for later sync`,
RETRYING: (attempt: number, max: number) =>
`Retrying sync (${attempt}/${max})...`,
} as const;
/**
* Titles for UI
*/
export const CLOUD_TITLES = {
SYNCING: "Syncing with cloud",
SYNCED: "Cloud sync complete",
OFFLINE: "Offline - changes queued",
CONFLICT: "Sync conflicts",
ERROR: "Sync failed",
} as const;
/**
* Conflict resolution labels
*/
export const CONFLICT_LABELS = {
"local-wins": "Keep local version",
"remote-wins": "Use remote version",
manual: "Resolve manually",
merge: "Attempt to merge",
} as const;
/**
* HTTP request configuration
*/
export const CLOUD_HTTP_CONFIG = {
TIMEOUT_MS: 30000,
HEADERS: {
"Content-Type": "application/json",
"X-Client": "codetyper-cli",
},
} as const;

View File

@@ -0,0 +1,75 @@
/**
* Brain MCP Server constants
*/
export const BRAIN_MCP_SERVER = {
DEFAULT_PORT: 5002,
DEFAULT_HOST: "localhost",
REQUEST_TIMEOUT: 30000,
MAX_CONNECTIONS: 100,
HEARTBEAT_INTERVAL: 30000,
} as const;
export const BRAIN_MCP_RATE_LIMIT = {
ENABLED: true,
MAX_REQUESTS: 100,
WINDOW_MS: 60000, // 1 minute
BLOCK_DURATION: 300000, // 5 minutes
} as const;
export const BRAIN_MCP_AUTH = {
HEADER: "X-Brain-API-Key",
TOKEN_PREFIX: "Bearer",
SESSION_DURATION: 3600000, // 1 hour
} as const;
export const BRAIN_MCP_COMMANDS = {
START: "/brain mcp start",
STOP: "/brain mcp stop",
STATUS: "/brain mcp status",
LOGS: "/brain mcp logs",
CONFIG: "/brain mcp config",
} as const;
export const BRAIN_MCP_TOOL_NAMES = {
RECALL: "brain_recall",
LEARN: "brain_learn",
SEARCH: "brain_search",
RELATE: "brain_relate",
CONTEXT: "brain_context",
STATS: "brain_stats",
PROJECTS: "brain_projects",
} as const;
export const BRAIN_MCP_MESSAGES = {
SERVER_STARTED: "Brain MCP server started",
SERVER_STOPPED: "Brain MCP server stopped",
SERVER_ALREADY_RUNNING: "Brain MCP server is already running",
SERVER_NOT_RUNNING: "Brain MCP server is not running",
CLIENT_CONNECTED: "MCP client connected",
CLIENT_DISCONNECTED: "MCP client disconnected",
TOOL_EXECUTED: "Tool executed successfully",
TOOL_FAILED: "Tool execution failed",
UNAUTHORIZED: "Unauthorized request",
RATE_LIMITED: "Rate limit exceeded",
INVALID_REQUEST: "Invalid MCP request",
} as const;
export const BRAIN_MCP_ERRORS = {
PARSE_ERROR: { code: -32700, message: "Parse error" },
INVALID_REQUEST: { code: -32600, message: "Invalid request" },
METHOD_NOT_FOUND: { code: -32601, message: "Method not found" },
INVALID_PARAMS: { code: -32602, message: "Invalid params" },
INTERNAL_ERROR: { code: -32603, message: "Internal error" },
TOOL_NOT_FOUND: { code: -32001, message: "Tool not found" },
UNAUTHORIZED: { code: -32002, message: "Unauthorized" },
RATE_LIMITED: { code: -32003, message: "Rate limited" },
BRAIN_UNAVAILABLE: { code: -32004, message: "Brain service unavailable" },
} as const;
export const BRAIN_MCP_LOG_LEVELS = {
DEBUG: 0,
INFO: 1,
WARN: 2,
ERROR: 3,
} as const;

View File

@@ -0,0 +1,69 @@
/**
* Multi-project Brain constants
*/
export const BRAIN_PROJECT = {
MAX_PROJECTS: 100,
NAME_MIN_LENGTH: 2,
NAME_MAX_LENGTH: 100,
DESCRIPTION_MAX_LENGTH: 500,
DEFAULT_RECALL_LIMIT: 5,
DEFAULT_SYNC_INTERVAL: 30, // minutes
} as const;
export const BRAIN_PROJECT_STORAGE = {
CONFIG_FILE: "brain-projects.json",
EXPORT_EXTENSION: ".brain-export.json",
BACKUP_EXTENSION: ".brain-backup.json",
} as const;
export const BRAIN_PROJECT_PATHS = {
LOCAL: ".codetyper/brain",
GLOBAL: "~/.local/share/codetyper/brain",
EXPORTS: "~/.local/share/codetyper/brain/exports",
BACKUPS: "~/.local/share/codetyper/brain/backups",
} as const;
export const BRAIN_PROJECT_COMMANDS = {
LIST: "/brain projects",
CREATE: "/brain project create",
SWITCH: "/brain project switch",
DELETE: "/brain project delete",
EXPORT: "/brain project export",
IMPORT: "/brain project import",
SYNC: "/brain project sync",
} as const;
export const BRAIN_PROJECT_API = {
LIST: "/api/projects",
CREATE: "/api/projects",
GET: "/api/projects/:id",
UPDATE: "/api/projects/:id",
DELETE: "/api/projects/:id",
SWITCH: "/api/projects/:id/switch",
EXPORT: "/api/projects/:id/export",
IMPORT: "/api/projects/import",
SYNC: "/api/projects/:id/sync",
} as const;
export const BRAIN_PROJECT_MESSAGES = {
CREATED: "Brain project created successfully",
SWITCHED: "Switched to project",
DELETED: "Brain project deleted",
EXPORTED: "Brain project exported",
IMPORTED: "Brain project imported",
SYNCED: "Brain project synced",
NOT_FOUND: "Brain project not found",
ALREADY_EXISTS: "Project with this name already exists",
INVALID_NAME: "Invalid project name",
SWITCH_FAILED: "Failed to switch project",
EXPORT_FAILED: "Failed to export project",
IMPORT_FAILED: "Failed to import project",
} as const;
export const BRAIN_PROJECT_DEFAULTS = {
AUTO_LEARN: true,
AUTO_RECALL: true,
CONTEXT_INJECTION: true,
SYNC_ENABLED: false,
} as const;

94
src/constants/brain.ts Normal file
View File

@@ -0,0 +1,94 @@
/**
* Brain API Constants
*
* Configuration constants for the CodeTyper Brain service
*/
/**
* Feature flag to disable all Brain functionality.
* Set to true to hide Brain menu, disable Brain API calls,
* and remove Brain-related UI elements.
*/
export const BRAIN_DISABLED = true;
export const BRAIN_PROVIDER_NAME = "brain" as const;
export const BRAIN_DISPLAY_NAME = "CodeTyper Brain";
export const BRAIN_DEFAULTS = {
BASE_URL: "http://localhost:5001",
PROJECT_ID: 1,
} as const;
export const BRAIN_ENDPOINTS = {
// Health
HEALTH: "/",
// Authentication
AUTH_REGISTER: "/auth/register",
AUTH_LOGIN: "/auth/login",
AUTH_LOGOUT: "/auth/logout",
AUTH_REFRESH: "/auth/refresh",
AUTH_ME: "/auth/me",
// Knowledge Graph
KNOWLEDGE_LEARN: "/api/knowledge/learn",
KNOWLEDGE_RECALL: "/api/knowledge/recall",
KNOWLEDGE_RELATE: "/api/knowledge/relate",
KNOWLEDGE_EXTRACT: "/api/knowledge/extract",
KNOWLEDGE_CONTEXT: "/api/knowledge/context",
KNOWLEDGE_CONCEPTS: "/api/knowledge/concepts",
KNOWLEDGE_STATS: "/api/knowledge/stats",
// Memory
MEMORY_STATUS: "/api/memory/status",
MEMORY_STATS: "/api/memory/stats",
MEMORY_SEARCH: "/api/memory/search",
MEMORY_STORE: "/api/memory/store",
MEMORY_TOP: "/api/memory/top",
MEMORY_FEEDBACK: "/api/memory/feedback",
// GraphQL (unified endpoint)
GRAPHQL: "/graphql",
} as const;
export const BRAIN_TIMEOUTS = {
HEALTH: 3000,
AUTH: 10000,
KNOWLEDGE: 15000,
MEMORY: 10000,
EXTRACT: 30000,
} as const;
export const BRAIN_ERRORS = {
NOT_RUNNING: "Brain service not available. Start the API server at localhost:5001",
NOT_AUTHENTICATED: "Not authenticated. Please login or set an API key.",
INVALID_API_KEY: "Invalid API key. Please check your credentials.",
CONNECTION_FAILED: "Failed to connect to Brain service.",
RECALL_FAILED: "Failed to recall knowledge from Brain.",
LEARN_FAILED: "Failed to store knowledge in Brain.",
EXTRACT_FAILED: "Failed to extract concepts from content.",
} as const;
export const BRAIN_MESSAGES = {
CONNECTED: "Brain connected",
CONNECTING: "Connecting to Brain...",
DISCONNECTED: "Brain disconnected",
LEARNING: "Learning concept...",
RECALLING: "Recalling knowledge...",
EXTRACTING: "Extracting concepts...",
} as const;
export const BRAIN_BANNER = {
TITLE: "CodeTyper has a Brain!",
CTA: "Login and get an API key to enable long-term memory",
URL: "http://localhost:5001",
LOGIN_URL: "http://localhost:5173/docs/login",
EMOJI_CONNECTED: "🧠",
EMOJI_DISCONNECTED: "💤",
} as const;
export const BRAIN_HEADERS = {
API_KEY: "api-key",
AUTHORIZATION: "Authorization",
CONTENT_TYPE: "Content-Type",
} as const;

View File

@@ -0,0 +1,33 @@
/**
* Confidence filtering constants
*/
export const CONFIDENCE_FILTER = {
DEFAULT_THRESHOLD: 80,
MIN_THRESHOLD: 0,
MAX_THRESHOLD: 100,
VALIDATION_TIMEOUT: 30000,
MAX_BATCH_SIZE: 50,
} as const;
export const CONFIDENCE_WEIGHTS = {
PATTERN_MATCH: 0.3,
CONTEXT_RELEVANCE: 0.25,
SEVERITY_LEVEL: 0.2,
CODE_ANALYSIS: 0.15,
HISTORICAL_ACCURACY: 0.1,
} as const;
export const CONFIDENCE_MESSAGES = {
BELOW_THRESHOLD: "Filtered out due to low confidence",
VALIDATION_FAILED: "Confidence adjusted after validation",
VALIDATION_PASSED: "Confidence validated successfully",
NO_FACTORS: "No confidence factors available",
} as const;
export const CONFIDENCE_COLORS = {
LOW: "#808080",
MEDIUM: "#FFA500",
HIGH: "#00FF00",
CRITICAL: "#FF0000",
} as const;

View File

@@ -0,0 +1,275 @@
/**
* Feature-Dev Workflow Constants
*
* Configuration and prompts for the 7-phase development workflow.
*/
import type { FeatureDevPhase, FeatureDevConfig } from "@/types/feature-dev";
/**
* Default workflow configuration
*/
export const FEATURE_DEV_CONFIG: FeatureDevConfig = {
requireCheckpoints: true,
autoRunTests: true,
autoCommit: false,
maxExplorationDepth: 3,
parallelExplorations: 3,
} as const;
/**
* Phase order for workflow progression
*/
export const PHASE_ORDER: FeatureDevPhase[] = [
"understand",
"explore",
"plan",
"implement",
"verify",
"review",
"finalize",
] as const;
/**
* Phase descriptions
*/
export const PHASE_DESCRIPTIONS: Record<FeatureDevPhase, string> = {
understand: "Clarify requirements and gather context",
explore: "Search codebase for relevant code and patterns",
plan: "Design the implementation approach",
implement: "Write the code changes",
verify: "Run tests and validate changes",
review: "Self-review the implementation",
finalize: "Commit changes and cleanup",
} as const;
/**
* Phase prompts for guiding the agent
*/
export const PHASE_PROMPTS: Record<FeatureDevPhase, string> = {
understand: `You are in the UNDERSTAND phase of feature development.
Your goal is to fully understand what needs to be built before writing any code.
Tasks:
1. Analyze the user's feature request
2. Identify unclear or ambiguous requirements
3. Ask clarifying questions if needed
4. Document the understood requirements
Output a summary of:
- What the feature should do
- User-facing behavior
- Technical requirements
- Edge cases to consider
- Any assumptions made
If anything is unclear, ask the user for clarification before proceeding.`,
explore: `You are in the EXPLORE phase of feature development.
Your goal is to understand the existing codebase before making changes.
Tasks:
1. Search for related code using grep and glob
2. Identify files that will need to be modified
3. Understand existing patterns and conventions
4. Find similar implementations to reference
5. Identify potential dependencies or impacts
Run multiple parallel searches to gather context efficiently.
Document your findings:
- Relevant files and their purposes
- Existing patterns to follow
- Code that might be affected
- Useful examples in the codebase`,
plan: `You are in the PLAN phase of feature development.
Your goal is to create a detailed implementation plan before writing code.
Tasks:
1. Design the solution architecture
2. List files to create, modify, or delete
3. Define the order of changes
4. Identify risks and dependencies
5. Plan the testing approach
Create a plan that includes:
- Summary of the approach
- Step-by-step implementation order
- File changes with descriptions
- Potential risks and mitigations
- Test cases to verify the feature
Present this plan for user approval before proceeding.`,
implement: `You are in the IMPLEMENT phase of feature development.
Your goal is to write the code according to the approved plan.
Tasks:
1. Follow the implementation plan step by step
2. Write clean, well-documented code
3. Follow existing code patterns and conventions
4. Create necessary files and make required changes
5. Track all changes made
Guidelines:
- Implement one step at a time
- Test each change locally if possible
- Keep changes focused and minimal
- Add comments for complex logic
- Update imports and exports as needed`,
verify: `You are in the VERIFY phase of feature development.
Your goal is to ensure the implementation works correctly.
Tasks:
1. Run the test suite
2. Add new tests for the feature
3. Fix any failing tests
4. Check for regressions
5. Verify edge cases
Report:
- Test results (pass/fail counts)
- Coverage information if available
- Any issues discovered
- Additional tests needed`,
review: `You are in the REVIEW phase of feature development.
Your goal is to self-review the implementation for quality.
Tasks:
1. Review all changes made
2. Check for code quality issues
3. Verify documentation is complete
4. Look for potential bugs
5. Ensure best practices are followed
Review criteria:
- Code clarity and readability
- Error handling
- Edge cases covered
- Performance considerations
- Security implications
- Documentation completeness
Report any findings that need attention.`,
finalize: `You are in the FINALIZE phase of feature development.
Your goal is to complete the feature implementation.
Tasks:
1. Create a commit with appropriate message
2. Update any documentation
3. Clean up temporary files
4. Prepare summary of changes
Output:
- Final list of changes
- Commit message (if committing)
- Any follow-up tasks recommended
- Success confirmation`,
} as const;
/**
* Checkpoint configuration per phase
*/
export const PHASE_CHECKPOINTS: Record<
FeatureDevPhase,
{ required: boolean; title: string }
> = {
understand: {
required: true,
title: "Requirements Confirmation",
},
explore: {
required: false,
title: "Exploration Summary",
},
plan: {
required: true,
title: "Implementation Plan Approval",
},
implement: {
required: false,
title: "Implementation Progress",
},
verify: {
required: true,
title: "Test Results Review",
},
review: {
required: true,
title: "Code Review Findings",
},
finalize: {
required: true,
title: "Final Approval",
},
} as const;
/**
* Error messages
*/
export const FEATURE_DEV_ERRORS = {
INVALID_PHASE: (phase: string) => `Invalid phase: ${phase}`,
INVALID_TRANSITION: (from: FeatureDevPhase, to: FeatureDevPhase) =>
`Cannot transition from ${from} to ${to}`,
CHECKPOINT_REQUIRED: (phase: FeatureDevPhase) =>
`User approval required for ${phase} phase`,
PHASE_FAILED: (phase: FeatureDevPhase, reason: string) =>
`Phase ${phase} failed: ${reason}`,
WORKFLOW_ABORTED: (reason: string) => `Workflow aborted: ${reason}`,
NO_PLAN: "Cannot implement without an approved plan",
TEST_FAILURE: "Tests failed - review required before proceeding",
} as const;
/**
* Status messages
*/
export const FEATURE_DEV_MESSAGES = {
STARTING: (phase: FeatureDevPhase) => `Starting ${phase} phase...`,
COMPLETED: (phase: FeatureDevPhase) => `Completed ${phase} phase`,
AWAITING_APPROVAL: (phase: FeatureDevPhase) =>
`Awaiting approval for ${phase}`,
CHECKPOINT: (title: string) => `Checkpoint: ${title}`,
EXPLORING: (query: string) => `Exploring: ${query}`,
IMPLEMENTING_STEP: (step: number, total: number) =>
`Implementing step ${step}/${total}`,
RUNNING_TESTS: "Running tests...",
REVIEWING: "Reviewing changes...",
FINALIZING: "Finalizing changes...",
} as const;
/**
* Allowed phase transitions
*/
export const ALLOWED_TRANSITIONS: Record<FeatureDevPhase, FeatureDevPhase[]> = {
understand: ["explore", "plan"], // Can skip explore if simple
explore: ["plan", "understand"], // Can go back to understand
plan: ["implement", "explore", "understand"], // Can go back
implement: ["verify", "plan"], // Can revise plan
verify: ["review", "implement"], // Can fix issues
review: ["finalize", "implement"], // Can fix issues
finalize: [], // Terminal state
} as const;
/**
* Phase timeout configuration (in ms)
*/
export const PHASE_TIMEOUTS: Record<FeatureDevPhase, number> = {
understand: 120000,
explore: 180000,
plan: 120000,
implement: 600000,
verify: 300000,
review: 120000,
finalize: 60000,
} as const;

View File

@@ -89,7 +89,7 @@ export const HELP_TOPICS: HelpTopic[] = [
fullDescription:
"Switch between Agent (full access), Ask (read-only), and Code Review modes.",
usage: "/mode",
shortcuts: ["Ctrl+Tab"],
shortcuts: ["Ctrl+M"],
category: "commands",
},
{
@@ -166,11 +166,11 @@ export const HELP_TOPICS: HelpTopic[] = [
category: "shortcuts",
},
{
id: "shortcut-ctrltab",
name: "Ctrl+Tab",
id: "shortcut-ctrlm",
name: "Ctrl+M",
shortDescription: "Cycle modes",
fullDescription: "Cycle through interaction modes.",
shortcuts: ["Ctrl+Tab"],
shortcuts: ["Ctrl+M"],
category: "shortcuts",
},
];

View File

@@ -1,4 +1,23 @@
export const HOME_VARS = {
title: "Welcome to CodeTyper - Your AI Coding Assistant",
subTitle: "Type a prompt below to start a new session",
subTitle: "Type a prompt below to start",
};
/** CODETYPER text logo */
export const ASCII_LOGO = [
" ██████╗ ██████╗ ██████╗ ███████╗ ████████╗ ██╗ ██╗ ██████╗ ███████╗ ██████╗ ",
"██╔════╝ ██╔═══██╗ ██╔══██╗ ██╔════╝ ╚══██╔══╝ ╚██╗ ██╔╝ ██╔══██╗ ██╔════╝ ██╔══██╗",
"██║ ██║ ██║ ██║ ██║ █████╗ ██║ ╚████╔╝ ██████╔╝ █████╗ ██████╔╝",
"██║ ██║ ██║ ██║ ██║ ██╔══╝ ██║ ╚██╔╝ ██╔═══╝ ██╔══╝ ██╔══██╗",
"╚██████╗ ╚██████╔╝ ██████╔╝ ███████╗ ██║ ██║ ██║ ███████╗ ██║ ██║",
" ╚═════╝ ╚═════╝ ╚═════╝ ╚══════╝ ╚═╝ ╚═╝ ╚═╝ ╚══════╝ ╚═╝ ╚═╝",
];
/** Gradient colors for CODETYPER text - from top to bottom */
export const ASCII_LOGO_GRADIENT = [
"#00FFFF", // Cyan
"#00D4FF", // Light blue
"#00AAFF", // Blue
"#0080FF", // Medium blue
"#0055FF", // Deep blue
"#AA00FF", // Purple
];

View File

@@ -0,0 +1,54 @@
/**
* MultiEdit Tool Constants
*
* Configuration for batch file editing operations
*/
export const MULTI_EDIT_DEFAULTS = {
MAX_EDITS: 50, // Maximum number of edits in a single batch
MAX_FILE_SIZE: 1024 * 1024, // 1MB max file size
} as const;
export const MULTI_EDIT_TITLES = {
VALIDATING: (count: number) => `Validating ${count} edits...`,
APPLYING: (current: number, total: number) =>
`Applying edit ${current}/${total}`,
SUCCESS: (count: number) => `Applied ${count} edits`,
PARTIAL: (success: number, failed: number) =>
`Applied ${success} edits, ${failed} failed`,
FAILED: "Multi-edit failed",
ROLLBACK: "Rolling back changes...",
} as const;
export const MULTI_EDIT_MESSAGES = {
NO_EDITS: "No edits provided",
TOO_MANY_EDITS: (max: number) => `Too many edits (max: ${max})`,
VALIDATION_FAILED: "Validation failed for one or more edits",
ATOMIC_FAILURE: "Atomic edit failed - all changes rolled back",
DUPLICATE_FILE: (path: string) =>
`Multiple edits to same file must be ordered: ${path}`,
OLD_STRING_NOT_FOUND: (path: string, preview: string) =>
`Old string not found in ${path}: "${preview}..."`,
OLD_STRING_NOT_UNIQUE: (path: string, count: number) =>
`Old string found ${count} times in ${path} (must be unique)`,
FILE_NOT_FOUND: (path: string) => `File not found: ${path}`,
FILE_TOO_LARGE: (path: string) => `File too large: ${path}`,
} as const;
export const MULTI_EDIT_DESCRIPTION = `Edit multiple files in a single atomic operation.
Use this tool when you need to:
- Make related changes across multiple files
- Refactor code that spans several files
- Apply consistent changes to many files
All edits are validated before any changes are applied.
If any edit fails validation, no changes are made.
Each edit requires:
- file_path: Absolute path to the file
- old_string: The exact text to find and replace
- new_string: The replacement text
The old_string must be unique in the file. If it appears multiple times,
provide more context to make it unique.`;

108
src/constants/parallel.ts Normal file
View File

@@ -0,0 +1,108 @@
/**
* Parallel Agent Execution Constants
*
* Configuration for concurrent task execution, resource limits,
* and conflict detection.
*/
import type { ResourceLimits, TaskPriority } from "@/types/parallel";
/**
* Default resource limits
*/
export const PARALLEL_DEFAULTS: ResourceLimits = {
maxConcurrentTasks: 5,
maxQueueSize: 50,
defaultTimeout: 60000,
maxRetries: 2,
} as const;
/**
* Priority weights for task ordering
*/
export const PRIORITY_WEIGHTS: Record<TaskPriority, number> = {
critical: 100,
high: 75,
normal: 50,
low: 25,
} as const;
/**
* Task type concurrency limits
* Some task types should have lower concurrency
*/
export const TASK_TYPE_LIMITS = {
explore: 5,
analyze: 4,
execute: 2,
search: 3,
} as const;
/**
* Conflict detection configuration
*/
export const CONFLICT_CONFIG = {
ENABLE_PATH_CONFLICT: true,
CONFLICT_CHECK_TIMEOUT_MS: 5000,
AUTO_RESOLVE_READ_CONFLICTS: true,
} as const;
/**
* Timeout values for different task types
*/
export const TASK_TIMEOUTS = {
explore: 30000,
analyze: 45000,
execute: 120000,
search: 15000,
} as const;
/**
* Error messages for parallel execution
*/
export const PARALLEL_ERRORS = {
QUEUE_FULL: "Task queue is full",
TIMEOUT: (taskId: string) => `Task ${taskId} timed out`,
CONFLICT: (taskId: string, paths: string[]) =>
`Task ${taskId} conflicts with paths: ${paths.join(", ")}`,
MAX_RETRIES: (taskId: string, retries: number) =>
`Task ${taskId} failed after ${retries} retries`,
CANCELLED: (taskId: string) => `Task ${taskId} was cancelled`,
INVALID_TASK: "Invalid task configuration",
EXECUTOR_ABORTED: "Executor was aborted",
} as const;
/**
* Status messages for parallel execution
*/
export const PARALLEL_MESSAGES = {
STARTING: (count: number) => `Starting ${count} parallel task(s)`,
COMPLETED: (success: number, failed: number) =>
`Completed: ${success} successful, ${failed} failed`,
QUEUED: (taskId: string, position: number) =>
`Task ${taskId} queued at position ${position}`,
RUNNING: (taskId: string) => `Running task: ${taskId}`,
WAITING_CONFLICT: (taskId: string) =>
`Task ${taskId} waiting for conflict resolution`,
RETRYING: (taskId: string, attempt: number) =>
`Retrying task ${taskId} (attempt ${attempt})`,
} as const;
/**
* Deduplication configuration
*/
export const DEDUP_CONFIG = {
ENABLE_CONTENT_DEDUP: true,
SIMILARITY_THRESHOLD: 0.95,
MAX_RESULTS_PER_TYPE: 100,
} as const;
/**
* Read-only task types (no conflict with each other)
*/
export const READ_ONLY_TASK_TYPES = new Set(["explore", "analyze", "search"]);
/**
* Modifying task types (conflict with all tasks on same paths)
*/
export const MODIFYING_TASK_TYPES = new Set(["execute"]);

View File

@@ -58,12 +58,18 @@ export const FILES = {
/** Provider credentials (stored in data, not config) */
credentials: join(DIRS.data, "credentials.json"),
/** Environment variables and tokens (API keys, JWT tokens, etc.) */
vars: join(DIRS.config, "vars.json"),
/** Command history */
history: join(DIRS.data, "history.json"),
/** Models cache */
modelsCache: join(DIRS.cache, "models.json"),
/** Copilot token cache */
copilotTokenCache: join(DIRS.cache, "copilot-token.json"),
/** Frecency cache for file/command suggestions */
frecency: join(DIRS.cache, "frecency.json"),

207
src/constants/pr-review.ts Normal file
View File

@@ -0,0 +1,207 @@
/**
* PR Review Toolkit Constants
*
* Configuration for multi-agent code review.
*/
import type {
PRReviewConfig,
ReviewSeverity,
ReviewFindingType,
} from "@/types/pr-review";
/**
* Minimum confidence threshold for reporting findings
* Only report findings with confidence >= 80%
*/
export const MIN_CONFIDENCE_THRESHOLD = 80;
/**
* Default review configuration
*/
export const DEFAULT_REVIEW_CONFIG: PRReviewConfig = {
minConfidence: MIN_CONFIDENCE_THRESHOLD,
reviewers: [
{ name: "security", type: "security", enabled: true, minConfidence: 80 },
{ name: "performance", type: "performance", enabled: true, minConfidence: 80 },
{ name: "style", type: "style", enabled: true, minConfidence: 85 },
{ name: "logic", type: "logic", enabled: true, minConfidence: 80 },
],
security: {
checkInjection: true,
checkXSS: true,
checkAuth: true,
checkSecrets: true,
checkDependencies: true,
},
performance: {
checkComplexity: true,
checkMemory: true,
checkQueries: true,
checkCaching: true,
checkRenders: true,
},
style: {
checkNaming: true,
checkFormatting: true,
checkConsistency: true,
checkComments: true,
},
logic: {
checkEdgeCases: true,
checkNullHandling: true,
checkErrorHandling: true,
checkConcurrency: true,
checkTypes: true,
},
excludePatterns: [
"**/node_modules/**",
"**/*.min.js",
"**/*.bundle.js",
"**/dist/**",
"**/build/**",
"**/*.lock",
"**/package-lock.json",
"**/yarn.lock",
"**/pnpm-lock.yaml",
],
maxFindings: 50,
} as const;
/**
* Severity emoji indicators
*/
export const SEVERITY_ICONS: Record<ReviewSeverity, string> = {
critical: "🔴",
warning: "🟠",
suggestion: "🟡",
nitpick: "🟢",
} as const;
/**
* Severity labels
*/
export const SEVERITY_LABELS: Record<ReviewSeverity, string> = {
critical: "CRITICAL",
warning: "WARNING",
suggestion: "SUGGESTION",
nitpick: "NITPICK",
} as const;
/**
* Finding type labels
*/
export const FINDING_TYPE_LABELS: Record<ReviewFindingType, string> = {
security: "Security",
performance: "Performance",
style: "Style",
logic: "Logic",
documentation: "Documentation",
testing: "Testing",
} as const;
/**
* Reviewer prompts
*/
export const REVIEWER_PROMPTS: Record<string, string> = {
security: `You are a security reviewer. Analyze the code changes for:
- SQL injection, XSS, command injection vulnerabilities
- Authentication and authorization issues
- Sensitive data exposure (API keys, passwords, tokens)
- Input validation and sanitization problems
- Insecure dependencies
Only report findings with high confidence (≥80%). For each issue:
- Describe the vulnerability
- Explain the potential impact
- Suggest a specific fix`,
performance: `You are a performance reviewer. Analyze the code changes for:
- Algorithmic complexity issues (O(n²) or worse operations)
- Memory usage problems (leaks, excessive allocations)
- Database query efficiency (N+1 queries, missing indexes)
- Unnecessary re-renders (React) or DOM manipulations
- Missing caching opportunities
Only report findings with high confidence (≥80%). For each issue:
- Describe the performance impact
- Provide complexity analysis if applicable
- Suggest optimization`,
style: `You are a code style reviewer. Analyze the code changes for:
- Naming convention violations
- Inconsistent formatting
- Code organization issues
- Missing or unclear documentation
- Deviations from project patterns
Only report significant style issues that affect readability or maintainability.
Skip minor formatting issues that could be auto-fixed.`,
logic: `You are a logic reviewer. Analyze the code changes for:
- Edge cases not handled
- Null/undefined reference risks
- Error handling gaps
- Race conditions or concurrency issues
- Type safety violations
Only report findings with high confidence (≥80%). For each issue:
- Describe the bug or potential bug
- Explain how it could manifest
- Suggest a fix with example code`,
} as const;
/**
* Rating thresholds
*/
export const RATING_THRESHOLDS = {
5: { maxCritical: 0, maxWarning: 0 },
4: { maxCritical: 0, maxWarning: 3 },
3: { maxCritical: 0, maxWarning: 10 },
2: { maxCritical: 1, maxWarning: 20 },
1: { maxCritical: Infinity, maxWarning: Infinity },
} as const;
/**
* Recommendation thresholds
*/
export const RECOMMENDATION_THRESHOLDS = {
approve: { maxCritical: 0, maxWarning: 0, maxSuggestion: 5 },
approve_with_suggestions: { maxCritical: 0, maxWarning: 3, maxSuggestion: Infinity },
request_changes: { maxCritical: 1, maxWarning: Infinity, maxSuggestion: Infinity },
needs_discussion: { maxCritical: Infinity, maxWarning: Infinity, maxSuggestion: Infinity },
} as const;
/**
* Error messages
*/
export const PR_REVIEW_ERRORS = {
NO_DIFF: "No diff content to review",
PARSE_FAILED: (error: string) => `Failed to parse diff: ${error}`,
REVIEWER_FAILED: (reviewer: string, error: string) =>
`Reviewer ${reviewer} failed: ${error}`,
NO_FILES: "No files in diff to review",
EXCLUDED_ALL: "All files excluded by pattern",
} as const;
/**
* Status messages
*/
export const PR_REVIEW_MESSAGES = {
STARTING: "Starting PR review...",
PARSING_DIFF: "Parsing diff...",
REVIEWING: (reviewer: string) => `Running ${reviewer} review...`,
AGGREGATING: "Aggregating results...",
COMPLETED: (findings: number) => `Review complete: ${findings} finding(s)`,
NO_FINDINGS: "No issues found",
} as const;
/**
* Report titles
*/
export const PR_REVIEW_TITLES = {
REPORT: "Pull Request Review",
FINDINGS: "Findings",
SUMMARY: "Summary",
RECOMMENDATION: "Recommendation",
} as const;

132
src/constants/skills.ts Normal file
View File

@@ -0,0 +1,132 @@
/**
* Skill System Constants
*
* Constants for skill loading, matching, and execution.
*/
import { join } from "path";
import { DIRS } from "@constants/paths";
/**
* Skill file configuration
*/
export const SKILL_FILE = {
NAME: "SKILL.md",
FRONTMATTER_DELIMITER: "---",
ENCODING: "utf-8",
} as const;
/**
* Skill directories
*/
export const SKILL_DIRS = {
BUILTIN: join(__dirname, "..", "skills"),
USER: join(DIRS.config, "skills"),
PROJECT: ".codetyper/skills",
} as const;
/**
* Skill loading configuration
*/
export const SKILL_LOADING = {
CACHE_TTL_MS: 60000,
MAX_SKILLS: 100,
MAX_FILE_SIZE_BYTES: 100000,
} as const;
/**
* Skill matching configuration
*/
export const SKILL_MATCHING = {
MIN_CONFIDENCE: 0.7,
EXACT_MATCH_BONUS: 0.3,
COMMAND_PREFIX: "/",
FUZZY_THRESHOLD: 0.6,
} as const;
/**
* Default skill metadata values
*/
export const SKILL_DEFAULTS = {
VERSION: "1.0.0",
TRIGGER_TYPE: "command" as const,
AUTO_TRIGGER: false,
REQUIRED_TOOLS: [] as string[],
} as const;
/**
* Skill error messages
*/
export const SKILL_ERRORS = {
NOT_FOUND: (id: string) => `Skill not found: ${id}`,
INVALID_FRONTMATTER: (file: string) => `Invalid frontmatter in: ${file}`,
MISSING_REQUIRED_FIELD: (field: string, file: string) =>
`Missing required field '${field}' in: ${file}`,
LOAD_FAILED: (file: string, error: string) =>
`Failed to load skill from ${file}: ${error}`,
NO_MATCH: "No matching skill found for input",
EXECUTION_FAILED: (id: string, error: string) =>
`Skill execution failed for ${id}: ${error}`,
} as const;
/**
* Skill titles for UI
*/
export const SKILL_TITLES = {
LOADING: (name: string) => `Loading skill: ${name}`,
EXECUTING: (name: string) => `Executing skill: ${name}`,
MATCHED: (name: string, confidence: number) =>
`Matched skill: ${name} (${(confidence * 100).toFixed(0)}%)`,
COMPLETED: (name: string) => `Skill completed: ${name}`,
FAILED: (name: string) => `Skill failed: ${name}`,
} as const;
/**
* Built-in skill IDs
*/
export const BUILTIN_SKILLS = {
COMMIT: "commit",
REVIEW_PR: "review-pr",
EXPLAIN: "explain",
FEATURE_DEV: "feature-dev",
} as const;
/**
* Skill trigger patterns for common commands
*/
export const SKILL_TRIGGER_PATTERNS = {
COMMIT: [
"/commit",
"commit changes",
"commit this",
"git commit",
"make a commit",
],
REVIEW_PR: [
"/review-pr",
"/review",
"review pr",
"review this pr",
"review pull request",
"code review",
],
EXPLAIN: [
"/explain",
"explain this",
"explain code",
"what does this do",
"how does this work",
],
FEATURE_DEV: [
"/feature",
"/feature-dev",
"implement feature",
"new feature",
"build feature",
],
} as const;
/**
* Required fields in skill frontmatter
*/
export const SKILL_REQUIRED_FIELDS = ["id", "name", "description", "triggers"] as const;

55
src/constants/token.ts Normal file
View File

@@ -0,0 +1,55 @@
/**
* Token Counting Constants
*
* Configuration for token estimation and context management
*/
// Token estimation ratios
export const CHARS_PER_TOKEN = 4;
export const TOKENS_PER_CHAR = 0.25;
// Context warning thresholds
export const TOKEN_WARNING_THRESHOLD = 0.75; // 75% - yellow warning
export const TOKEN_CRITICAL_THRESHOLD = 0.90; // 90% - red warning
export const TOKEN_OVERFLOW_THRESHOLD = 0.95; // 95% - trigger compaction
// Pruning thresholds (following OpenCode pattern)
export const PRUNE_MINIMUM_TOKENS = 20000; // Min tokens to actually prune
export const PRUNE_PROTECT_TOKENS = 40000; // Threshold before marking for pruning
export const PRUNE_RECENT_TURNS = 2; // Protect last N user turns
// Protected tools that should never be pruned
export const PRUNE_PROTECTED_TOOLS = new Set([
"skill",
"todo_read",
"todo_write",
]);
// Default context sizes
export const DEFAULT_MAX_CONTEXT_TOKENS = 128000;
export const DEFAULT_OUTPUT_TOKENS = 16000;
// Token display formatting
export const TOKEN_DISPLAY = {
SEPARATOR: "/",
UNIT_K: "K",
FORMAT_DECIMALS: 1,
} as const;
// Token status colors (semantic keys for theme lookup)
export const TOKEN_STATUS_COLORS = {
NORMAL: "textDim",
WARNING: "warning",
CRITICAL: "error",
COMPACTING: "info",
} as const;
// Messages
export const TOKEN_MESSAGES = {
CONTEXT_LOW: "Context running low",
CONTEXT_CRITICAL: "Context nearly full",
COMPACTION_STARTING: "Starting context compaction...",
COMPACTION_COMPLETE: (saved: number) =>
`Compaction complete: ${saved.toLocaleString()} tokens freed`,
OVERFLOW_WARNING: "Context overflow detected",
} as const;

View File

@@ -49,6 +49,8 @@ export const MODE_DISPLAY_CONFIG: Record<string, ModeDisplayConfig> = {
learning_prompt: { text: "Save Learning?", color: "cyan" },
help_menu: { text: "Help", color: "cyan" },
help_detail: { text: "Help Detail", color: "cyan" },
brain_menu: { text: "Brain Settings", color: "magenta" },
brain_login: { text: "Brain Login", color: "magenta" },
} as const;
export const DEFAULT_MODE_DISPLAY: ModeDisplayConfig = {
@@ -219,6 +221,11 @@ export const SLASH_COMMANDS: SlashCommand[] = [
description: "Sign out from provider",
category: "account",
},
{
name: "brain",
description: "Configure CodeTyper Brain (memory & knowledge)",
category: "account",
},
];
export const COMMAND_CATEGORIES = [

View File

@@ -0,0 +1,75 @@
/**
* WebFetch Tool Constants
*
* Configuration for the web content fetching tool
*/
export const WEB_FETCH_DEFAULTS = {
TIMEOUT_MS: 30000,
MAX_CONTENT_LENGTH: 500000, // 500KB max
USER_AGENT:
"Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/120.0.0.0 Safari/537.36",
} as const;
export const WEB_FETCH_TITLES = {
FETCHING: (url: string) => `Fetching: ${url}`,
SUCCESS: "Content fetched",
FAILED: "Fetch failed",
TIMEOUT: "Fetch timed out",
} as const;
export const WEB_FETCH_MESSAGES = {
URL_REQUIRED: "URL is required",
INVALID_URL: (url: string) => `Invalid URL: ${url}`,
TIMEOUT: "Request timed out",
FETCH_ERROR: (error: string) => `Fetch failed: ${error}`,
CONTENT_TOO_LARGE: "Content exceeds maximum size limit",
REDIRECT_DETECTED: (from: string, to: string) =>
`Redirected from ${from} to ${to}`,
} as const;
export const WEB_FETCH_DESCRIPTION = `Fetch content from a URL and convert HTML to markdown.
Use this tool when you need to:
- Read documentation from a URL
- Fetch API responses
- Get content from web pages
The content will be converted to markdown for readability.
HTML will be cleaned and converted. JSON responses are formatted.
Note: This tool cannot access authenticated or private URLs.
For GitHub URLs, prefer using the \`bash\` tool with \`gh\` CLI instead.`;
// Supported content types for conversion
export const SUPPORTED_CONTENT_TYPES = {
HTML: ["text/html", "application/xhtml+xml"],
JSON: ["application/json", "text/json"],
TEXT: ["text/plain", "text/markdown", "text/csv"],
XML: ["text/xml", "application/xml"],
} as const;
// HTML elements to remove (scripts, styles, etc.)
export const HTML_REMOVE_ELEMENTS = [
"script",
"style",
"noscript",
"iframe",
"svg",
"canvas",
"video",
"audio",
"nav",
"footer",
"aside",
];
// HTML elements to convert to markdown
export const HTML_BLOCK_ELEMENTS = [
"p",
"div",
"section",
"article",
"main",
"header",
];

View File

@@ -207,7 +207,7 @@ Read-only tools only:
- You are in READ-ONLY mode - you cannot modify files
- Always search before answering questions about the codebase
- If asked to make changes, explain that you're in Ask mode and suggest switching to Agent mode (Ctrl+Tab)
- If asked to make changes, explain that you're in Ask mode and suggest switching to Agent mode (Ctrl+M)
- For general programming questions, you can answer without searching`;
/**

View File

@@ -246,6 +246,7 @@ const executeStream = (
if (delta?.tool_calls) {
for (const tc of delta.tool_calls) {
addDebugLog("api", `Tool call chunk: ${JSON.stringify(tc)}`);
console.log("Debug: Tool call chunk received:", JSON.stringify(tc));
onChunk({ type: "tool_call", toolCall: tc });
}
}

View File

@@ -2,13 +2,14 @@
* Copilot token management
*/
import { readFile } from "fs/promises";
import { readFile, writeFile, mkdir } from "fs/promises";
import { existsSync } from "fs";
import { homedir, platform } from "os";
import { join } from "path";
import got from "got";
import { COPILOT_AUTH_URL } from "@constants/copilot";
import { FILES, DIRS } from "@constants/paths";
import {
getState,
setOAuthToken,
@@ -16,6 +17,36 @@ import {
} from "@providers/copilot/state";
import type { CopilotToken } from "@/types/copilot";
/**
* Load cached Copilot token from disk
*/
const loadCachedToken = async (): Promise<CopilotToken | null> => {
try {
const data = await readFile(FILES.copilotTokenCache, "utf-8");
const token = JSON.parse(data) as CopilotToken;
// Check if token is still valid (with 60 second buffer)
if (token.expires_at > Date.now() / 1000 + 60) {
return token;
}
} catch {
// Cache doesn't exist or is invalid
}
return null;
};
/**
* Save Copilot token to disk cache
*/
const saveCachedToken = async (token: CopilotToken): Promise<void> => {
try {
await mkdir(DIRS.cache, { recursive: true });
await writeFile(FILES.copilotTokenCache, JSON.stringify(token), "utf-8");
} catch {
// Silently fail - caching is optional
}
};
const getConfigDir = (): string => {
const home = homedir();
const os = platform();
@@ -88,6 +119,7 @@ export const refreshToken = async (): Promise<CopilotToken> => {
const currentState = getState();
// Check in-memory cache first
if (
currentState.githubToken &&
currentState.githubToken.expires_at > Date.now() / 1000
@@ -95,6 +127,14 @@ export const refreshToken = async (): Promise<CopilotToken> => {
return currentState.githubToken;
}
// Check disk cache to avoid network request on startup
const cachedToken = await loadCachedToken();
if (cachedToken) {
setGitHubToken(cachedToken);
return cachedToken;
}
// Fetch new token from GitHub
const response = await got
.get(COPILOT_AUTH_URL, {
headers: {
@@ -109,6 +149,10 @@ export const refreshToken = async (): Promise<CopilotToken> => {
}
setGitHubToken(response);
// Cache to disk for faster startup next time
saveCachedToken(response).catch(() => {});
return response;
};

View File

@@ -22,15 +22,36 @@ import type {
OllamaChatResponse,
OllamaToolCall,
OllamaToolDefinition,
OllamaMessage,
} from "@/types/ollama";
const formatMessages = (
messages: Message[],
): Array<{ role: string; content: string }> =>
messages.map((msg) => ({
role: msg.role,
content: msg.content,
}));
/**
* Format messages for Ollama API
* Handles regular messages, assistant messages with tool_calls, and tool response messages
*/
const formatMessages = (messages: Message[]): OllamaMessage[] =>
messages.map((msg) => {
const formatted: OllamaMessage = {
role: msg.role,
content: msg.content,
};
// Include tool_calls for assistant messages that made tool calls
if (msg.tool_calls && msg.tool_calls.length > 0) {
formatted.tool_calls = msg.tool_calls.map((tc) => ({
id: tc.id,
function: {
name: tc.function.name,
arguments:
typeof tc.function.arguments === "string"
? JSON.parse(tc.function.arguments)
: tc.function.arguments,
},
}));
}
return formatted;
});
const formatTools = (
tools: ChatCompletionOptions["tools"],

View File

@@ -0,0 +1,289 @@
/**
* Agent definition loader service
* Loads agent definitions from markdown files with YAML frontmatter
*/
import { readFile, readdir } from "node:fs/promises";
import { join, basename, extname } from "node:path";
import { existsSync } from "node:fs";
import { homedir } from "node:os";
import type {
AgentDefinition,
AgentFrontmatter,
AgentDefinitionFile,
AgentRegistry,
AgentLoadResult,
AgentTier,
AgentColor,
} from "@src/types/agent-definition";
import { DEFAULT_AGENT_DEFINITION, AGENT_DEFINITION_SCHEMA } from "@src/types/agent-definition";
import { AGENT_DEFINITION, AGENT_DEFINITION_PATHS, AGENT_MESSAGES } from "@src/constants/agent-definition";
const parseFrontmatter = (content: string): { frontmatter: Record<string, unknown>; body: string } | null => {
const delimiter = AGENT_DEFINITION.FRONTMATTER_DELIMITER;
const lines = content.split("\n");
if (lines[0]?.trim() !== delimiter) {
return null;
}
const endIndex = lines.findIndex((line, index) => index > 0 && line.trim() === delimiter);
if (endIndex === -1) {
return null;
}
const frontmatterLines = lines.slice(1, endIndex);
const body = lines.slice(endIndex + 1).join("\n").trim();
// Simple YAML parser for frontmatter
const frontmatter: Record<string, unknown> = {};
let currentKey = "";
let currentArray: string[] | null = null;
frontmatterLines.forEach((line) => {
const trimmed = line.trim();
if (trimmed.startsWith("- ") && currentArray !== null) {
currentArray.push(trimmed.slice(2));
return;
}
if (currentArray !== null) {
frontmatter[currentKey] = currentArray;
currentArray = null;
}
const colonIndex = trimmed.indexOf(":");
if (colonIndex === -1) return;
const key = trimmed.slice(0, colonIndex).trim();
const value = trimmed.slice(colonIndex + 1).trim();
if (value === "") {
currentKey = key;
currentArray = [];
} else if (value.startsWith("[") && value.endsWith("]")) {
frontmatter[key] = value
.slice(1, -1)
.split(",")
.map((s) => s.trim().replace(/^["']|["']$/g, ""));
} else if (value === "true") {
frontmatter[key] = true;
} else if (value === "false") {
frontmatter[key] = false;
} else if (!isNaN(Number(value))) {
frontmatter[key] = Number(value);
} else {
frontmatter[key] = value.replace(/^["']|["']$/g, "");
}
});
if (currentArray !== null) {
frontmatter[currentKey] = currentArray;
}
return { frontmatter, body };
};
const validateFrontmatter = (frontmatter: Record<string, unknown>): AgentFrontmatter | null => {
const { required } = AGENT_DEFINITION_SCHEMA;
for (const field of required) {
if (!(field in frontmatter)) {
return null;
}
}
const name = frontmatter.name;
const description = frontmatter.description;
const tools = frontmatter.tools;
if (typeof name !== "string" || typeof description !== "string" || !Array.isArray(tools)) {
return null;
}
return {
name,
description,
tools: tools as ReadonlyArray<string>,
tier: (frontmatter.tier as AgentTier) || DEFAULT_AGENT_DEFINITION.tier,
color: (frontmatter.color as AgentColor) || DEFAULT_AGENT_DEFINITION.color,
maxTurns: (frontmatter.maxTurns as number) || DEFAULT_AGENT_DEFINITION.maxTurns,
triggerPhrases: (frontmatter.triggerPhrases as ReadonlyArray<string>) || [],
capabilities: (frontmatter.capabilities as ReadonlyArray<string>) || [],
allowedPaths: frontmatter.allowedPaths as ReadonlyArray<string> | undefined,
deniedPaths: frontmatter.deniedPaths as ReadonlyArray<string> | undefined,
};
};
const frontmatterToDefinition = (frontmatter: AgentFrontmatter, content: string): AgentDefinition => ({
name: frontmatter.name,
description: frontmatter.description,
tools: frontmatter.tools,
tier: frontmatter.tier || (DEFAULT_AGENT_DEFINITION.tier as AgentTier),
color: frontmatter.color || (DEFAULT_AGENT_DEFINITION.color as AgentColor),
maxTurns: frontmatter.maxTurns || DEFAULT_AGENT_DEFINITION.maxTurns,
systemPrompt: content || undefined,
triggerPhrases: frontmatter.triggerPhrases || [],
capabilities: frontmatter.capabilities || [],
permissions: {
allowedPaths: frontmatter.allowedPaths,
deniedPaths: frontmatter.deniedPaths,
},
});
export const loadAgentDefinitionFile = async (filePath: string): Promise<AgentLoadResult> => {
try {
const content = await readFile(filePath, "utf-8");
const parsed = parseFrontmatter(content);
if (!parsed) {
return { success: false, error: AGENT_MESSAGES.INVALID_FRONTMATTER, filePath };
}
const frontmatter = validateFrontmatter(parsed.frontmatter);
if (!frontmatter) {
return { success: false, error: AGENT_MESSAGES.MISSING_REQUIRED, filePath };
}
const agent = frontmatterToDefinition(frontmatter, parsed.body);
return { success: true, agent, filePath };
} catch (error) {
const message = error instanceof Error ? error.message : "Unknown error";
return { success: false, error: message, filePath };
}
};
export const loadAgentDefinitionsFromDirectory = async (
directoryPath: string
): Promise<ReadonlyArray<AgentLoadResult>> => {
const resolvedPath = directoryPath.replace("~", homedir());
if (!existsSync(resolvedPath)) {
return [];
}
try {
const files = await readdir(resolvedPath);
const mdFiles = files.filter(
(file) => extname(file) === AGENT_DEFINITION.FILE_EXTENSION
);
const results = await Promise.all(
mdFiles.map((file) => loadAgentDefinitionFile(join(resolvedPath, file)))
);
return results;
} catch {
return [];
}
};
export const loadAllAgentDefinitions = async (
projectPath: string
): Promise<AgentRegistry> => {
const agents = new Map<string, AgentDefinition>();
const byTrigger = new Map<string, string>();
const byCapability = new Map<string, string[]>();
// Load from all paths in priority order (project > global > builtin)
const paths = [
join(projectPath, AGENT_DEFINITION_PATHS.PROJECT),
AGENT_DEFINITION_PATHS.GLOBAL,
];
for (const path of paths) {
const results = await loadAgentDefinitionsFromDirectory(path);
results.forEach((result) => {
if (result.success && result.agent) {
const { agent } = result;
// Don't override if already loaded (project takes precedence)
if (!agents.has(agent.name)) {
agents.set(agent.name, agent);
// Index by trigger phrases
agent.triggerPhrases?.forEach((phrase) => {
byTrigger.set(phrase.toLowerCase(), agent.name);
});
// Index by capabilities
agent.capabilities?.forEach((capability) => {
const existing = byCapability.get(capability) || [];
byCapability.set(capability, [...existing, agent.name]);
});
}
}
});
}
return { agents, byTrigger, byCapability };
};
export const findAgentByTrigger = (
registry: AgentRegistry,
text: string
): AgentDefinition | undefined => {
const normalized = text.toLowerCase();
for (const [phrase, agentName] of registry.byTrigger) {
if (normalized.includes(phrase)) {
return registry.agents.get(agentName);
}
}
return undefined;
};
export const findAgentsByCapability = (
registry: AgentRegistry,
capability: string
): ReadonlyArray<AgentDefinition> => {
const agentNames = registry.byCapability.get(capability) || [];
return agentNames
.map((name) => registry.agents.get(name))
.filter((a): a is AgentDefinition => a !== undefined);
};
export const getAgentByName = (
registry: AgentRegistry,
name: string
): AgentDefinition | undefined => registry.agents.get(name);
export const listAllAgents = (registry: AgentRegistry): ReadonlyArray<AgentDefinition> =>
Array.from(registry.agents.values());
export const createAgentDefinitionContent = (agent: AgentDefinition): string => {
const frontmatter = [
"---",
`name: ${agent.name}`,
`description: ${agent.description}`,
`tools: [${agent.tools.join(", ")}]`,
`tier: ${agent.tier}`,
`color: ${agent.color}`,
];
if (agent.maxTurns) {
frontmatter.push(`maxTurns: ${agent.maxTurns}`);
}
if (agent.triggerPhrases && agent.triggerPhrases.length > 0) {
frontmatter.push("triggerPhrases:");
agent.triggerPhrases.forEach((phrase) => frontmatter.push(` - ${phrase}`));
}
if (agent.capabilities && agent.capabilities.length > 0) {
frontmatter.push("capabilities:");
agent.capabilities.forEach((cap) => frontmatter.push(` - ${cap}`));
}
frontmatter.push("---");
const content = agent.systemPrompt || `# ${agent.name}\n\n${agent.description}`;
return `${frontmatter.join("\n")}\n\n${content}`;
};

View File

@@ -0,0 +1,389 @@
/**
* Background task service
* Manages background task execution, queue, and lifecycle
*/
import { randomUUID } from "node:crypto";
import { writeFile, readFile, mkdir, readdir, unlink } from "node:fs/promises";
import { join } from "node:path";
import { existsSync } from "node:fs";
import { homedir } from "node:os";
import type {
BackgroundTask,
BackgroundTaskStatus,
BackgroundTaskPriority,
BackgroundTaskConfig,
TaskProgress,
TaskResult,
TaskError,
TaskMetadata,
TaskNotification,
TaskStep,
TaskArtifact,
} from "@src/types/background-task";
import { DEFAULT_BACKGROUND_TASK_CONFIG, BACKGROUND_TASK_PRIORITIES } from "@src/types/background-task";
import {
BACKGROUND_TASK,
BACKGROUND_TASK_STORAGE,
BACKGROUND_TASK_MESSAGES,
BACKGROUND_TASK_STATUS_ICONS,
} from "@src/constants/background-task";
type TaskHandler = (task: BackgroundTask, updateProgress: (progress: Partial<TaskProgress>) => void) => Promise<TaskResult>;
type NotificationHandler = (notification: TaskNotification) => void;
interface BackgroundTaskState {
tasks: Map<string, BackgroundTask>;
queue: string[];
running: string[];
handlers: Map<string, TaskHandler>;
notificationHandlers: NotificationHandler[];
config: BackgroundTaskConfig;
}
const state: BackgroundTaskState = {
tasks: new Map(),
queue: [],
running: [],
handlers: new Map(),
notificationHandlers: [],
config: DEFAULT_BACKGROUND_TASK_CONFIG,
};
const getStoragePath = (): string => {
const basePath = join(homedir(), ".local", "share", "codetyper", "tasks");
return basePath;
};
const ensureStorageDirectory = async (): Promise<void> => {
const storagePath = getStoragePath();
if (!existsSync(storagePath)) {
await mkdir(storagePath, { recursive: true });
}
};
const persistTask = async (task: BackgroundTask): Promise<void> => {
if (!state.config.persistTasks) return;
await ensureStorageDirectory();
const filePath = join(getStoragePath(), `${task.id}${BACKGROUND_TASK_STORAGE.FILE_EXTENSION}`);
await writeFile(filePath, JSON.stringify(task, null, 2));
};
const removePersistedTask = async (taskId: string): Promise<void> => {
const filePath = join(getStoragePath(), `${taskId}${BACKGROUND_TASK_STORAGE.FILE_EXTENSION}`);
if (existsSync(filePath)) {
await unlink(filePath);
}
};
const loadPersistedTasks = async (): Promise<void> => {
const storagePath = getStoragePath();
if (!existsSync(storagePath)) return;
const files = await readdir(storagePath);
const taskFiles = files.filter((f) => f.endsWith(BACKGROUND_TASK_STORAGE.FILE_EXTENSION));
for (const file of taskFiles) {
try {
const content = await readFile(join(storagePath, file), "utf-8");
const task = JSON.parse(content) as BackgroundTask;
// Re-queue pending/running tasks that were interrupted
if (task.status === "pending" || task.status === "running") {
const updatedTask: BackgroundTask = {
...task,
status: "pending",
};
state.tasks.set(task.id, updatedTask);
state.queue.push(task.id);
} else {
state.tasks.set(task.id, task);
}
} catch {
// Skip corrupted task files
}
}
};
const notify = (taskId: string, type: TaskNotification["type"], message: string): void => {
const notification: TaskNotification = {
taskId,
type,
message,
timestamp: Date.now(),
};
state.notificationHandlers.forEach((handler) => handler(notification));
};
const createInitialProgress = (): TaskProgress => ({
current: 0,
total: 100,
percentage: 0,
message: "Starting...",
steps: [],
});
const processQueue = async (): Promise<void> => {
while (
state.queue.length > 0 &&
state.running.length < state.config.maxConcurrent
) {
// Sort by priority
state.queue.sort((a, b) => {
const taskA = state.tasks.get(a);
const taskB = state.tasks.get(b);
if (!taskA || !taskB) return 0;
return BACKGROUND_TASK_PRIORITIES[taskB.priority] - BACKGROUND_TASK_PRIORITIES[taskA.priority];
});
const taskId = state.queue.shift();
if (!taskId) continue;
const task = state.tasks.get(taskId);
if (!task) continue;
await executeTask(task);
}
};
const executeTask = async (task: BackgroundTask): Promise<void> => {
const handler = state.handlers.get(task.name);
if (!handler) {
await updateTaskStatus(task.id, "failed", {
code: "HANDLER_NOT_FOUND",
message: `No handler registered for task: ${task.name}`,
recoverable: false,
});
return;
}
state.running.push(task.id);
const updatedTask: BackgroundTask = {
...task,
status: "running",
startedAt: Date.now(),
};
state.tasks.set(task.id, updatedTask);
await persistTask(updatedTask);
notify(task.id, "started", BACKGROUND_TASK_MESSAGES.STARTED);
const updateProgress = (partial: Partial<TaskProgress>): void => {
const currentTask = state.tasks.get(task.id);
if (!currentTask) return;
const newProgress: TaskProgress = {
...currentTask.progress,
...partial,
percentage: partial.current !== undefined && partial.total !== undefined
? Math.round((partial.current / partial.total) * 100)
: currentTask.progress.percentage,
};
const progressTask: BackgroundTask = {
...currentTask,
progress: newProgress,
};
state.tasks.set(task.id, progressTask);
notify(task.id, "progress", newProgress.message);
};
try {
const result = await Promise.race([
handler(updatedTask, updateProgress),
new Promise<never>((_, reject) =>
setTimeout(() => reject(new Error("Task timeout")), state.config.defaultTimeout)
),
]);
await completeTask(task.id, result);
} catch (error) {
const taskError: TaskError = {
code: "EXECUTION_ERROR",
message: error instanceof Error ? error.message : "Unknown error",
stack: error instanceof Error ? error.stack : undefined,
recoverable: true,
};
await updateTaskStatus(task.id, "failed", taskError);
} finally {
state.running = state.running.filter((id) => id !== task.id);
processQueue();
}
};
const completeTask = async (taskId: string, result: TaskResult): Promise<void> => {
const task = state.tasks.get(taskId);
if (!task) return;
const completedTask: BackgroundTask = {
...task,
status: "completed",
completedAt: Date.now(),
result,
progress: {
...task.progress,
current: task.progress.total,
percentage: 100,
message: "Completed",
},
};
state.tasks.set(taskId, completedTask);
await persistTask(completedTask);
notify(taskId, "completed", BACKGROUND_TASK_MESSAGES.COMPLETED);
};
const updateTaskStatus = async (
taskId: string,
status: BackgroundTaskStatus,
error?: TaskError
): Promise<void> => {
const task = state.tasks.get(taskId);
if (!task) return;
const updatedTask: BackgroundTask = {
...task,
status,
error,
completedAt: ["completed", "failed", "cancelled"].includes(status) ? Date.now() : undefined,
};
state.tasks.set(taskId, updatedTask);
await persistTask(updatedTask);
if (status === "failed") {
notify(taskId, "failed", error?.message || BACKGROUND_TASK_MESSAGES.FAILED);
}
};
// Public API
export const initialize = async (config?: Partial<BackgroundTaskConfig>): Promise<void> => {
state.config = { ...DEFAULT_BACKGROUND_TASK_CONFIG, ...config };
await loadPersistedTasks();
processQueue();
};
export const registerHandler = (name: string, handler: TaskHandler): void => {
state.handlers.set(name, handler);
};
export const onNotification = (handler: NotificationHandler): () => void => {
state.notificationHandlers.push(handler);
return () => {
state.notificationHandlers = state.notificationHandlers.filter((h) => h !== handler);
};
};
export const createTask = async (
name: string,
description: string,
metadata: TaskMetadata,
priority: BackgroundTaskPriority = "normal"
): Promise<BackgroundTask> => {
const task: BackgroundTask = {
id: randomUUID(),
name,
description,
status: "pending",
priority,
createdAt: Date.now(),
progress: createInitialProgress(),
metadata,
};
state.tasks.set(task.id, task);
state.queue.push(task.id);
await persistTask(task);
processQueue();
return task;
};
export const cancelTask = async (taskId: string): Promise<boolean> => {
const task = state.tasks.get(taskId);
if (!task) return false;
if (task.status === "running") {
await updateTaskStatus(taskId, "cancelled");
state.running = state.running.filter((id) => id !== taskId);
notify(taskId, "failed", BACKGROUND_TASK_MESSAGES.CANCELLED);
return true;
}
if (task.status === "pending") {
state.queue = state.queue.filter((id) => id !== taskId);
await updateTaskStatus(taskId, "cancelled");
return true;
}
return false;
};
export const pauseTask = async (taskId: string): Promise<boolean> => {
const task = state.tasks.get(taskId);
if (!task || task.status !== "running") return false;
await updateTaskStatus(taskId, "paused");
state.running = state.running.filter((id) => id !== taskId);
notify(taskId, "progress", BACKGROUND_TASK_MESSAGES.PAUSED);
return true;
};
export const resumeTask = async (taskId: string): Promise<boolean> => {
const task = state.tasks.get(taskId);
if (!task || task.status !== "paused") return false;
state.queue.unshift(taskId);
await updateTaskStatus(taskId, "pending");
notify(taskId, "progress", BACKGROUND_TASK_MESSAGES.RESUMED);
processQueue();
return true;
};
export const getTask = (taskId: string): BackgroundTask | undefined =>
state.tasks.get(taskId);
export const listTasks = (filter?: { status?: BackgroundTaskStatus }): ReadonlyArray<BackgroundTask> => {
let tasks = Array.from(state.tasks.values());
if (filter?.status) {
tasks = tasks.filter((t) => t.status === filter.status);
}
return tasks.sort((a, b) => b.createdAt - a.createdAt);
};
export const clearCompletedTasks = async (): Promise<number> => {
const completed = Array.from(state.tasks.values()).filter(
(t) => t.status === "completed" || t.status === "failed" || t.status === "cancelled"
);
for (const task of completed) {
state.tasks.delete(task.id);
await removePersistedTask(task.id);
}
return completed.length;
};
export const getTaskStatusIcon = (status: BackgroundTaskStatus): string =>
BACKGROUND_TASK_STATUS_ICONS[status];
export const formatTaskSummary = (task: BackgroundTask): string => {
const icon = getTaskStatusIcon(task.status);
const progress = task.status === "running" ? ` (${task.progress.percentage}%)` : "";
return `${icon} ${task.name}${progress} - ${task.description}`;
};
export const getQueueLength = (): number => state.queue.length;
export const getRunningCount = (): number => state.running.length;

688
src/services/brain.ts Normal file
View File

@@ -0,0 +1,688 @@
/**
* Brain Service
*
* Business logic layer for the CodeTyper Brain integration.
* Provides context injection, knowledge recall, and learning capabilities.
*/
import fs from "fs/promises";
import { DIRS, FILES } from "@constants/paths";
import { BRAIN_DEFAULTS, BRAIN_ERRORS, BRAIN_DISABLED } from "@constants/brain";
import * as brainApi from "@api/brain";
import type {
BrainCredentials,
BrainState,
BrainConnectionStatus,
BrainUser,
BrainConcept,
BrainRecallResponse,
BrainExtractResponse,
} from "@/types/brain";
// ============================================================================
// State Management (Singleton via Closure)
// ============================================================================
interface VarsFile {
brainApiKey?: string;
brainJwtToken?: string;
}
let brainState: BrainState = {
status: "disconnected",
user: null,
projectId: BRAIN_DEFAULTS.PROJECT_ID,
knowledgeCount: 0,
memoryCount: 0,
lastError: null,
};
let cachedCredentials: BrainCredentials | null = null;
let cachedVars: VarsFile | null = null;
// ============================================================================
// Vars File Management
// ============================================================================
/**
* Load vars file from disk
*/
const loadVarsFile = async (): Promise<VarsFile> => {
if (cachedVars) {
return cachedVars;
}
try {
const data = await fs.readFile(FILES.vars, "utf-8");
cachedVars = JSON.parse(data) as VarsFile;
return cachedVars;
} catch {
return {};
}
};
/**
* Save vars file to disk
*/
const saveVarsFile = async (vars: VarsFile): Promise<void> => {
try {
await fs.mkdir(DIRS.config, { recursive: true });
await fs.writeFile(FILES.vars, JSON.stringify(vars, null, 2), "utf-8");
cachedVars = vars;
} catch (error) {
throw new Error(`Failed to save vars file: ${error}`);
}
};
// ============================================================================
// Credentials Management
// ============================================================================
/**
* Get path to brain credentials file
*/
const getCredentialsPath = (): string => {
return `${DIRS.data}/brain-credentials.json`;
};
/**
* Load brain credentials from disk
*/
export const loadCredentials = async (): Promise<BrainCredentials | null> => {
if (cachedCredentials) {
return cachedCredentials;
}
try {
const data = await fs.readFile(getCredentialsPath(), "utf-8");
cachedCredentials = JSON.parse(data) as BrainCredentials;
return cachedCredentials;
} catch {
return null;
}
};
/**
* Save brain credentials to disk
*/
export const saveCredentials = async (
credentials: BrainCredentials,
): Promise<void> => {
try {
await fs.mkdir(DIRS.data, { recursive: true });
await fs.writeFile(
getCredentialsPath(),
JSON.stringify(credentials, null, 2),
"utf-8",
);
cachedCredentials = credentials;
} catch (error) {
throw new Error(`Failed to save brain credentials: ${error}`);
}
};
/**
* Clear brain credentials
*/
export const clearCredentials = async (): Promise<void> => {
try {
await fs.unlink(getCredentialsPath());
cachedCredentials = null;
} catch {
// File may not exist, ignore
}
// Also clear vars file entries
try {
const vars = await loadVarsFile();
await saveVarsFile({
...vars,
brainApiKey: undefined,
brainJwtToken: undefined,
});
} catch {
// Ignore errors
}
};
/**
* Get API key from vars file or environment
*/
export const getApiKey = async (): Promise<string | undefined> => {
// First check environment variable
const envKey = process.env.CODETYPER_BRAIN_API_KEY;
if (envKey) {
return envKey;
}
// Then check vars file
const vars = await loadVarsFile();
return vars.brainApiKey;
};
/**
* Get JWT token from vars file
*/
export const getJwtToken = async (): Promise<string | undefined> => {
const vars = await loadVarsFile();
return vars.brainJwtToken;
};
/**
* Set API key in vars file
*/
export const setApiKey = async (apiKey: string): Promise<void> => {
const vars = await loadVarsFile();
await saveVarsFile({ ...vars, brainApiKey: apiKey });
};
/**
* Set JWT token in vars file
*/
export const setJwtToken = async (jwtToken: string): Promise<void> => {
const vars = await loadVarsFile();
await saveVarsFile({ ...vars, brainJwtToken: jwtToken });
};
// ============================================================================
// Authentication
// ============================================================================
/**
* Login to Brain service
*/
export const login = async (
email: string,
password: string,
): Promise<{ success: boolean; user?: BrainUser; error?: string }> => {
try {
updateState({ status: "connecting" });
const response = await brainApi.login(email, password);
if (response.success && response.data) {
const credentials: BrainCredentials = {
accessToken: response.data.access_token,
refreshToken: response.data.refresh_token,
expiresAt: response.data.expires_at,
user: response.data.user,
};
await saveCredentials(credentials);
updateState({
status: "connected",
user: response.data.user,
lastError: null,
});
return { success: true, user: response.data.user };
}
updateState({ status: "error", lastError: "Login failed" });
return { success: false, error: "Login failed" };
} catch (error) {
const errorMessage =
error instanceof Error ? error.message : "Unknown error";
updateState({ status: "error", lastError: errorMessage });
return { success: false, error: errorMessage };
}
};
/**
* Register a new account
*/
export const register = async (
email: string,
password: string,
displayName: string,
): Promise<{ success: boolean; user?: BrainUser; error?: string }> => {
try {
updateState({ status: "connecting" });
const response = await brainApi.register(email, password, displayName);
if (response.success && response.data) {
const credentials: BrainCredentials = {
accessToken: response.data.access_token,
refreshToken: response.data.refresh_token,
expiresAt: response.data.expires_at,
user: response.data.user,
};
await saveCredentials(credentials);
updateState({
status: "connected",
user: response.data.user,
lastError: null,
});
return { success: true, user: response.data.user };
}
updateState({ status: "error", lastError: "Registration failed" });
return { success: false, error: "Registration failed" };
} catch (error) {
const errorMessage =
error instanceof Error ? error.message : "Unknown error";
updateState({ status: "error", lastError: errorMessage });
return { success: false, error: errorMessage };
}
};
/**
* Logout from Brain service
*/
export const logout = async (): Promise<void> => {
try {
const credentials = await loadCredentials();
if (credentials?.refreshToken) {
await brainApi.logout(credentials.refreshToken);
}
} catch {
// Ignore logout errors
} finally {
await clearCredentials();
updateState({
status: "disconnected",
user: null,
knowledgeCount: 0,
memoryCount: 0,
});
}
};
// ============================================================================
// Connection Management
// ============================================================================
/**
* Get authentication token (API key or JWT token)
*/
export const getAuthToken = async (): Promise<string | undefined> => {
const apiKey = await getApiKey();
if (apiKey) {
return apiKey;
}
return getJwtToken();
};
/**
* Check if Brain service is available and connect
*/
export const connect = async (): Promise<boolean> => {
// Skip connection when Brain is disabled
if (BRAIN_DISABLED) {
return false;
}
try {
updateState({ status: "connecting" });
// First check if service is healthy
await brainApi.checkHealth();
// Then check if we have valid credentials (API key or JWT token)
const authToken = await getAuthToken();
if (!authToken) {
updateState({ status: "disconnected", lastError: null });
return false;
}
// Try to get stats to verify credentials are valid
const projectId = brainState.projectId ?? BRAIN_DEFAULTS.PROJECT_ID;
const statsResponse = await brainApi.getKnowledgeStats(projectId, authToken);
if (statsResponse.success && statsResponse.data) {
updateState({
status: "connected",
knowledgeCount: statsResponse.data.total_concepts,
lastError: null,
});
// Also try to get memory stats
try {
const memoryStats = await brainApi.getMemoryStats(authToken);
updateState({ memoryCount: memoryStats.totalNodes });
} catch {
// Memory stats are optional
}
return true;
}
updateState({ status: "error", lastError: BRAIN_ERRORS.INVALID_API_KEY });
return false;
} catch (error) {
const errorMessage =
error instanceof Error ? error.message : BRAIN_ERRORS.CONNECTION_FAILED;
updateState({ status: "error", lastError: errorMessage });
return false;
}
};
/**
* Disconnect from Brain service
*/
export const disconnect = (): void => {
updateState({
status: "disconnected",
knowledgeCount: 0,
memoryCount: 0,
lastError: null,
});
};
/**
* Check if connected to Brain
*/
export const isConnected = (): boolean => {
if (BRAIN_DISABLED) return false;
return brainState.status === "connected";
};
// ============================================================================
// Knowledge Operations
// ============================================================================
/**
* Recall relevant knowledge for a query
*/
export const recall = async (
query: string,
limit = 5,
): Promise<BrainRecallResponse | null> => {
if (!isConnected()) {
return null;
}
try {
const apiKey = await getApiKey();
if (!apiKey) {
return null;
}
const response = await brainApi.recallKnowledge(
{
query,
project_id: brainState.projectId ?? BRAIN_DEFAULTS.PROJECT_ID,
limit,
},
apiKey,
);
return response;
} catch (error) {
const errorMessage =
error instanceof Error ? error.message : BRAIN_ERRORS.RECALL_FAILED;
updateState({ lastError: errorMessage });
return null;
}
};
/**
* Get context string for prompt injection
*/
export const getContext = async (
query: string,
maxConcepts = 3,
): Promise<string | null> => {
if (!isConnected()) {
return null;
}
try {
const apiKey = await getApiKey();
if (!apiKey) {
return null;
}
const response = await brainApi.buildContext(
{
query,
project_id: brainState.projectId ?? BRAIN_DEFAULTS.PROJECT_ID,
max_concepts: maxConcepts,
},
apiKey,
);
if (response.success && response.data.has_knowledge) {
return response.data.context;
}
return null;
} catch {
return null;
}
};
/**
* Learn a concept
*/
export const learn = async (
name: string,
whatItDoes: string,
options?: {
howItWorks?: string;
patterns?: string[];
files?: string[];
keyFunctions?: string[];
aliases?: string[];
},
): Promise<BrainConcept | null> => {
if (!isConnected()) {
return null;
}
try {
const apiKey = await getApiKey();
if (!apiKey) {
return null;
}
const response = await brainApi.learnConcept(
{
project_id: brainState.projectId ?? BRAIN_DEFAULTS.PROJECT_ID,
name,
what_it_does: whatItDoes,
how_it_works: options?.howItWorks,
patterns: options?.patterns,
files: options?.files,
key_functions: options?.keyFunctions,
aliases: options?.aliases,
},
apiKey,
);
if (response.success && response.data) {
// Update knowledge count
updateState({ knowledgeCount: brainState.knowledgeCount + 1 });
return response.data;
}
return null;
} catch (error) {
const errorMessage =
error instanceof Error ? error.message : BRAIN_ERRORS.LEARN_FAILED;
updateState({ lastError: errorMessage });
return null;
}
};
/**
* Extract and learn concepts from content
*/
export const extractAndLearn = async (
content: string,
source = "conversation",
): Promise<BrainExtractResponse | null> => {
if (!isConnected()) {
return null;
}
try {
const apiKey = await getApiKey();
if (!apiKey) {
return null;
}
const response = await brainApi.extractConcepts(
{
content,
project_id: brainState.projectId ?? BRAIN_DEFAULTS.PROJECT_ID,
source,
},
apiKey,
);
if (response.success) {
// Update knowledge count
const newCount =
brainState.knowledgeCount + response.data.stored + response.data.updated;
updateState({ knowledgeCount: newCount });
return response;
}
return null;
} catch (error) {
const errorMessage =
error instanceof Error ? error.message : BRAIN_ERRORS.EXTRACT_FAILED;
updateState({ lastError: errorMessage });
return null;
}
};
// ============================================================================
// Memory Operations
// ============================================================================
/**
* Search memories
*/
export const searchMemories = async (
query: string,
limit = 10,
): Promise<{ memories: Array<{ content: string; similarity: number }> } | null> => {
if (!isConnected()) {
return null;
}
try {
const apiKey = await getApiKey();
if (!apiKey) {
return null;
}
const response = await brainApi.searchMemories(
{
query,
limit,
project_id: brainState.projectId ?? BRAIN_DEFAULTS.PROJECT_ID,
},
apiKey,
);
return {
memories: response.memories.map((m) => ({
content: m.content,
similarity: m.similarity ?? 0,
})),
};
} catch {
return null;
}
};
/**
* Store a memory
*/
export const storeMemory = async (
content: string,
type: "fact" | "pattern" | "correction" | "preference" | "context" = "context",
): Promise<boolean> => {
if (!isConnected()) {
return false;
}
try {
const apiKey = await getApiKey();
if (!apiKey) {
return false;
}
const response = await brainApi.storeMemory(
{
content,
type,
project_id: brainState.projectId ?? BRAIN_DEFAULTS.PROJECT_ID,
},
apiKey,
);
if (response.success) {
updateState({ memoryCount: brainState.memoryCount + 1 });
return true;
}
return false;
} catch {
return false;
}
};
// ============================================================================
// State Accessors
// ============================================================================
/**
* Get current brain state
*/
export const getState = (): BrainState => {
return { ...brainState };
};
/**
* Update brain state
*/
const updateState = (updates: Partial<BrainState>): void => {
brainState = { ...brainState, ...updates };
};
/**
* Set project ID
*/
export const setProjectId = (projectId: number): void => {
updateState({ projectId });
};
/**
* Get connection status
*/
export const getStatus = (): BrainConnectionStatus => {
return brainState.status;
};
/**
* Check if authenticated (has API key or JWT token)
*/
export const isAuthenticated = async (): Promise<boolean> => {
const apiKey = await getApiKey();
const jwtToken = await getJwtToken();
return apiKey !== undefined || jwtToken !== undefined;
};
// ============================================================================
// Initialization
// ============================================================================
/**
* Initialize brain service (auto-connect if credentials available)
*/
export const initialize = async (): Promise<boolean> => {
const hasAuth = await isAuthenticated();
if (hasAuth) {
return connect();
}
return false;
};

View File

@@ -0,0 +1,523 @@
/**
* Cloud Sync Service
*
* Handles push/pull synchronization with the cloud brain service.
*/
import {
CLOUD_BRAIN_DEFAULTS,
CLOUD_ENDPOINTS,
CLOUD_ERRORS,
CLOUD_MESSAGES,
CLOUD_HTTP_CONFIG,
SYNC_CONFIG,
} from "@constants/brain-cloud";
import {
enqueue,
enqueueBatch,
dequeue,
markProcessed,
markFailed,
hasQueuedItems,
getQueueSize,
clearQueue,
} from "@services/brain/offline-queue";
import {
createConflict,
resolveAllConflicts,
getPendingConflicts,
hasUnresolvedConflicts,
clearResolvedConflicts,
} from "@services/brain/conflict-resolver";
import type {
BrainSyncState,
CloudBrainConfig,
SyncItem,
SyncResult,
SyncOptions,
PushRequest,
PushResponse,
PullRequest,
PullResponse,
} from "@/types/brain-cloud";
// Sync state
let syncState: BrainSyncState = {
status: "synced",
lastSyncAt: null,
lastPushAt: null,
lastPullAt: null,
pendingChanges: 0,
conflictCount: 0,
syncErrors: [],
};
// Cloud configuration
let cloudConfig: CloudBrainConfig = { ...CLOUD_BRAIN_DEFAULTS };
// Sync lock to prevent concurrent syncs
let syncInProgress = false;
// Local version tracking
let localVersion = 0;
/**
* Configure cloud sync
*/
export const configure = (config: Partial<CloudBrainConfig>): void => {
cloudConfig = { ...cloudConfig, ...config };
};
/**
* Get current sync state
*/
export const getSyncState = (): BrainSyncState => ({ ...syncState });
/**
* Get cloud configuration
*/
export const getConfig = (): CloudBrainConfig => ({ ...cloudConfig });
/**
* Check if cloud sync is enabled
*/
export const isEnabled = (): boolean => cloudConfig.enabled;
/**
* Check if device is online
*/
const isOnline = (): boolean => {
// In Node.js/Bun, we'll assume online unless proven otherwise
return true;
};
/**
* Perform a full sync (push then pull)
*/
export const sync = async (
authToken: string,
projectId: number,
options: SyncOptions = {},
): Promise<SyncResult> => {
if (!cloudConfig.enabled) {
throw new Error(CLOUD_ERRORS.NOT_CONFIGURED);
}
if (syncInProgress) {
throw new Error(CLOUD_ERRORS.SYNC_IN_PROGRESS);
}
if (!isOnline()) {
syncState.status = "offline";
throw new Error(CLOUD_ERRORS.OFFLINE);
}
syncInProgress = true;
syncState.status = "syncing";
syncState.syncErrors = [];
const startTime = Date.now();
const result: SyncResult = {
success: true,
direction: options.direction ?? "both",
itemsSynced: 0,
itemsFailed: 0,
conflicts: [],
errors: [],
duration: 0,
timestamp: startTime,
};
try {
const direction = options.direction ?? "both";
// Push local changes
if (direction === "push" || direction === "both") {
options.onProgress?.({
phase: "pushing",
current: 0,
total: await getQueueSize(),
message: CLOUD_MESSAGES.STARTING_SYNC,
});
const pushResult = await pushChanges(authToken, projectId, options);
result.itemsSynced += pushResult.itemsSynced;
result.itemsFailed += pushResult.itemsFailed;
result.conflicts.push(...pushResult.conflicts);
result.errors.push(...pushResult.errors);
if (pushResult.errors.length > 0) {
result.success = false;
}
}
// Pull remote changes
if (direction === "pull" || direction === "both") {
options.onProgress?.({
phase: "pulling",
current: 0,
total: 0,
message: CLOUD_MESSAGES.PULLING(0),
});
const pullResult = await pullChanges(authToken, projectId, options);
result.itemsSynced += pullResult.itemsSynced;
result.itemsFailed += pullResult.itemsFailed;
result.conflicts.push(...pullResult.conflicts);
result.errors.push(...pullResult.errors);
if (pullResult.errors.length > 0) {
result.success = false;
}
}
// Handle conflicts if any
if (result.conflicts.length > 0) {
options.onProgress?.({
phase: "resolving",
current: 0,
total: result.conflicts.length,
message: CLOUD_MESSAGES.RESOLVING_CONFLICTS(result.conflicts.length),
});
const strategy = options.conflictStrategy ?? cloudConfig.conflictStrategy;
if (strategy !== "manual") {
resolveAllConflicts(strategy);
result.conflicts = getPendingConflicts();
}
if (hasUnresolvedConflicts()) {
syncState.status = "conflict";
syncState.conflictCount = result.conflicts.length;
}
}
// Update state
result.duration = Date.now() - startTime;
if (result.success && result.conflicts.length === 0) {
syncState.status = "synced";
syncState.lastSyncAt = Date.now();
} else if (result.conflicts.length > 0) {
syncState.status = "conflict";
} else {
syncState.status = "error";
}
syncState.pendingChanges = await getQueueSize();
syncState.syncErrors = result.errors;
options.onProgress?.({
phase: "completing",
current: result.itemsSynced,
total: result.itemsSynced,
message: CLOUD_MESSAGES.SYNC_COMPLETE,
});
return result;
} catch (error) {
const message = error instanceof Error ? error.message : String(error);
syncState.status = "error";
syncState.syncErrors.push(message);
result.success = false;
result.errors.push(message);
result.duration = Date.now() - startTime;
return result;
} finally {
syncInProgress = false;
clearResolvedConflicts();
}
};
/**
* Push local changes to cloud
*/
const pushChanges = async (
authToken: string,
projectId: number,
options: SyncOptions,
): Promise<Omit<SyncResult, "direction" | "timestamp">> => {
const result = {
success: true,
itemsSynced: 0,
itemsFailed: 0,
conflicts: [] as SyncResult["conflicts"],
errors: [] as string[],
duration: 0,
};
// Get queued items
const queuedItems = await dequeue(SYNC_CONFIG.MAX_BATCH_SIZE);
if (queuedItems.length === 0) {
return result;
}
options.onProgress?.({
phase: "pushing",
current: 0,
total: queuedItems.length,
message: CLOUD_MESSAGES.PUSHING(queuedItems.length),
});
const items = queuedItems.map((q) => q.item);
try {
const response = await pushToCloud(authToken, projectId, items);
if (response.success) {
result.itemsSynced = response.accepted;
result.itemsFailed = response.rejected;
// Mark successful items as processed
const successIds = queuedItems
.slice(0, response.accepted)
.map((q) => q.id);
await markProcessed(successIds);
// Handle conflicts
for (const conflict of response.conflicts) {
result.conflicts.push(conflict);
}
syncState.lastPushAt = Date.now();
} else {
result.success = false;
result.errors.push(...(response.errors ?? []));
// Mark all as failed
await markFailed(
queuedItems.map((q) => q.id),
response.errors?.[0],
);
}
} catch (error) {
const message = error instanceof Error ? error.message : String(error);
result.success = false;
result.errors.push(CLOUD_ERRORS.PUSH_FAILED(message));
// Queue for retry
await markFailed(
queuedItems.map((q) => q.id),
message,
);
}
return result;
};
/**
* Pull remote changes from cloud
*/
const pullChanges = async (
authToken: string,
projectId: number,
options: SyncOptions,
): Promise<Omit<SyncResult, "direction" | "timestamp">> => {
const result = {
success: true,
itemsSynced: 0,
itemsFailed: 0,
conflicts: [] as SyncResult["conflicts"],
errors: [] as string[],
duration: 0,
};
try {
const response = await pullFromCloud(
authToken,
projectId,
localVersion,
syncState.lastPullAt ?? 0,
);
if (response.success) {
options.onProgress?.({
phase: "pulling",
current: response.items.length,
total: response.items.length,
message: CLOUD_MESSAGES.PULLING(response.items.length),
});
// Process pulled items
for (const item of response.items) {
// Check for conflicts with local changes
const hasConflict = await checkLocalConflict(item);
if (hasConflict) {
// Create conflict entry
const localItem = await getLocalItem(item.id, item.type);
if (localItem) {
const conflict = createConflict(localItem, item);
result.conflicts.push(conflict);
}
} else {
// Apply remote change locally
await applyRemoteChange(item);
result.itemsSynced++;
}
}
// Update local version
localVersion = response.serverVersion;
syncState.lastPullAt = Date.now();
} else {
result.success = false;
result.errors.push(...(response.errors ?? []));
}
} catch (error) {
const message = error instanceof Error ? error.message : String(error);
result.success = false;
result.errors.push(CLOUD_ERRORS.PULL_FAILED(message));
}
return result;
};
/**
* Push items to cloud API
*/
const pushToCloud = async (
authToken: string,
projectId: number,
items: SyncItem[],
): Promise<PushResponse> => {
const url = `${cloudConfig.endpoint}${CLOUD_ENDPOINTS.PUSH}`;
const request: PushRequest = {
items,
projectId,
clientVersion: "1.0.0",
};
const response = await fetch(url, {
method: "POST",
headers: {
...CLOUD_HTTP_CONFIG.HEADERS,
Authorization: `Bearer ${authToken}`,
},
body: JSON.stringify(request),
signal: AbortSignal.timeout(CLOUD_HTTP_CONFIG.TIMEOUT_MS),
});
if (!response.ok) {
throw new Error(`HTTP ${response.status}: ${response.statusText}`);
}
return response.json() as Promise<PushResponse>;
};
/**
* Pull items from cloud API
*/
const pullFromCloud = async (
authToken: string,
projectId: number,
sinceVersion: number,
sinceTimestamp: number,
): Promise<PullResponse> => {
const url = `${cloudConfig.endpoint}${CLOUD_ENDPOINTS.PULL}`;
const request: PullRequest = {
projectId,
sinceVersion,
sinceTimestamp,
limit: SYNC_CONFIG.MAX_BATCH_SIZE,
};
const response = await fetch(url, {
method: "POST",
headers: {
...CLOUD_HTTP_CONFIG.HEADERS,
Authorization: `Bearer ${authToken}`,
},
body: JSON.stringify(request),
signal: AbortSignal.timeout(CLOUD_HTTP_CONFIG.TIMEOUT_MS),
});
if (!response.ok) {
throw new Error(`HTTP ${response.status}: ${response.statusText}`);
}
return response.json() as Promise<PullResponse>;
};
/**
* Check if pulled item conflicts with local changes
*/
const checkLocalConflict = async (
_item: SyncItem,
): Promise<boolean> => {
// Check if we have pending changes for this item
const queued = await hasQueuedItems();
return queued;
};
/**
* Get local item by ID and type
*/
const getLocalItem = async (
_id: string,
_type: "concept" | "memory" | "relation",
): Promise<SyncItem | null> => {
// This would retrieve the local item from the brain service
// Placeholder implementation
return null;
};
/**
* Apply a remote change locally
*/
const applyRemoteChange = async (_item: SyncItem): Promise<void> => {
// This would apply the change to the local brain storage
// Placeholder implementation
};
/**
* Queue a change for sync
*/
export const queueChange = async (item: SyncItem): Promise<void> => {
await enqueue(item);
syncState.pendingChanges = await getQueueSize();
syncState.status = "pending";
};
/**
* Queue multiple changes
*/
export const queueChanges = async (items: SyncItem[]): Promise<number> => {
const added = await enqueueBatch(items);
syncState.pendingChanges = await getQueueSize();
syncState.status = "pending";
return added;
};
/**
* Force sync now
*/
export const syncNow = async (
authToken: string,
projectId: number,
): Promise<SyncResult> => {
return sync(authToken, projectId, { force: true });
};
/**
* Reset sync state
*/
export const resetSyncState = async (): Promise<void> => {
await clearQueue();
syncState = {
status: "synced",
lastSyncAt: null,
lastPushAt: null,
lastPullAt: null,
pendingChanges: 0,
conflictCount: 0,
syncErrors: [],
};
localVersion = 0;
};

View File

@@ -0,0 +1,249 @@
/**
* Conflict Resolver
*
* Handles sync conflicts between local and remote brain data.
*/
import {
CONFLICT_LABELS,
} from "@constants/brain-cloud";
import type {
SyncConflict,
ConflictStrategy,
SyncItem,
} from "@/types/brain-cloud";
// In-memory conflict storage
const pendingConflicts = new Map<string, SyncConflict>();
/**
* Create a conflict from local and remote items
*/
export const createConflict = (
localItem: SyncItem,
remoteItem: SyncItem,
): SyncConflict => {
const conflict: SyncConflict = {
id: generateConflictId(),
itemId: localItem.id,
itemType: localItem.type,
localData: localItem.data,
remoteData: remoteItem.data,
localVersion: localItem.localVersion,
remoteVersion: remoteItem.remoteVersion ?? 0,
localTimestamp: localItem.timestamp,
remoteTimestamp: remoteItem.timestamp,
resolved: false,
};
pendingConflicts.set(conflict.id, conflict);
return conflict;
};
/**
* Resolve a conflict using the specified strategy
*/
export const resolveConflict = (
conflictId: string,
strategy: ConflictStrategy,
): SyncConflict | null => {
const conflict = pendingConflicts.get(conflictId);
if (!conflict) return null;
const resolver = resolvers[strategy];
const resolvedData = resolver(conflict);
conflict.resolved = true;
conflict.resolution = strategy;
conflict.resolvedData = resolvedData;
return conflict;
};
/**
* Resolve all pending conflicts with a single strategy
*/
export const resolveAllConflicts = (
strategy: ConflictStrategy,
): SyncConflict[] => {
const resolved: SyncConflict[] = [];
for (const [id, conflict] of pendingConflicts) {
if (!conflict.resolved) {
const result = resolveConflict(id, strategy);
if (result) {
resolved.push(result);
}
}
}
return resolved;
};
/**
* Conflict resolution strategies
*/
const resolvers: Record<ConflictStrategy, (conflict: SyncConflict) => unknown> = {
"local-wins": (conflict) => conflict.localData,
"remote-wins": (conflict) => conflict.remoteData,
manual: (_conflict) => {
// Manual resolution returns null - requires user input
return null;
},
merge: (conflict) => {
// Attempt to merge the data
return mergeData(conflict.localData, conflict.remoteData);
},
};
/**
* Attempt to merge two data objects
*/
const mergeData = (local: unknown, remote: unknown): unknown => {
// If both are objects, merge their properties
if (isObject(local) && isObject(remote)) {
const localObj = local as Record<string, unknown>;
const remoteObj = remote as Record<string, unknown>;
const merged: Record<string, unknown> = { ...remoteObj };
for (const key of Object.keys(localObj)) {
// Local wins for non-timestamp fields that differ
if (key !== "updatedAt" && key !== "timestamp") {
merged[key] = localObj[key];
}
}
// Use most recent timestamp
const localTime = (localObj.updatedAt ?? localObj.timestamp ?? 0) as number;
const remoteTime = (remoteObj.updatedAt ?? remoteObj.timestamp ?? 0) as number;
merged.updatedAt = Math.max(localTime, remoteTime);
return merged;
}
// For non-objects, prefer local (or most recent)
return local;
};
/**
* Check if value is an object
*/
const isObject = (value: unknown): value is Record<string, unknown> => {
return typeof value === "object" && value !== null && !Array.isArray(value);
};
/**
* Get pending conflicts
*/
export const getPendingConflicts = (): SyncConflict[] => {
return Array.from(pendingConflicts.values()).filter((c) => !c.resolved);
};
/**
* Get all conflicts
*/
export const getAllConflicts = (): SyncConflict[] => {
return Array.from(pendingConflicts.values());
};
/**
* Get conflict by ID
*/
export const getConflict = (id: string): SyncConflict | undefined => {
return pendingConflicts.get(id);
};
/**
* Clear resolved conflicts
*/
export const clearResolvedConflicts = (): number => {
let cleared = 0;
for (const [id, conflict] of pendingConflicts) {
if (conflict.resolved) {
pendingConflicts.delete(id);
cleared++;
}
}
return cleared;
};
/**
* Clear all conflicts
*/
export const clearAllConflicts = (): void => {
pendingConflicts.clear();
};
/**
* Get conflict count
*/
export const getConflictCount = (): number => {
return getPendingConflicts().length;
};
/**
* Check if there are unresolved conflicts
*/
export const hasUnresolvedConflicts = (): boolean => {
return getPendingConflicts().length > 0;
};
/**
* Get suggested resolution for a conflict
*/
export const suggestResolution = (conflict: SyncConflict): ConflictStrategy => {
// If remote is newer, suggest remote-wins
if (conflict.remoteTimestamp > conflict.localTimestamp) {
return "remote-wins";
}
// If local is newer, suggest local-wins
if (conflict.localTimestamp > conflict.remoteTimestamp) {
return "local-wins";
}
// If timestamps are equal, try merge
return "merge";
};
/**
* Format conflict for display
*/
export const formatConflict = (conflict: SyncConflict): string => {
const lines: string[] = [];
lines.push(`**Conflict: ${conflict.itemId}**`);
lines.push(`Type: ${conflict.itemType}`);
lines.push(`Local version: ${conflict.localVersion}`);
lines.push(`Remote version: ${conflict.remoteVersion}`);
lines.push("");
lines.push("Local data:");
lines.push("```json");
lines.push(JSON.stringify(conflict.localData, null, 2));
lines.push("```");
lines.push("");
lines.push("Remote data:");
lines.push("```json");
lines.push(JSON.stringify(conflict.remoteData, null, 2));
lines.push("```");
if (conflict.resolved) {
lines.push("");
lines.push(`Resolution: ${CONFLICT_LABELS[conflict.resolution!]}`);
}
return lines.join("\n");
};
/**
* Generate unique conflict ID
*/
const generateConflictId = (): string => {
return `conflict_${Date.now()}_${Math.random().toString(36).substr(2, 9)}`;
};

View File

@@ -0,0 +1,354 @@
/**
* Brain MCP Server service
* Exposes Brain as an MCP server for external tools
*/
import { createServer, type Server, type IncomingMessage, type ServerResponse } from "node:http";
import { randomUUID } from "node:crypto";
import type {
BrainMcpServerConfig,
BrainMcpRequest,
BrainMcpResponse,
BrainMcpServerStatus,
BrainMcpToolName,
McpContent,
McpError,
} from "@src/types/brain-mcp";
import {
DEFAULT_BRAIN_MCP_SERVER_CONFIG,
BRAIN_MCP_TOOLS,
MCP_ERROR_CODES,
} from "@src/types/brain-mcp";
import {
BRAIN_MCP_SERVER,
BRAIN_MCP_MESSAGES,
BRAIN_MCP_ERRORS,
BRAIN_MCP_AUTH,
} from "@src/constants/brain-mcp";
type BrainService = {
recall: (query: string, limit?: number) => Promise<unknown>;
learn: (name: string, whatItDoes: string, options?: unknown) => Promise<unknown>;
searchMemories: (query: string, limit?: number, type?: string) => Promise<unknown>;
relate: (source: string, target: string, type: string, weight?: number) => Promise<unknown>;
getContext: (query: string, maxConcepts?: number) => Promise<string>;
getStats: () => Promise<unknown>;
isConnected: () => boolean;
};
interface McpServerState {
server: Server | null;
config: BrainMcpServerConfig;
brainService: BrainService | null;
connectedClients: number;
startTime: number | null;
requestsServed: number;
lastRequestAt: number | null;
rateLimitMap: Map<string, { count: number; resetAt: number }>;
apiKeys: Set<string>;
}
const state: McpServerState = {
server: null,
config: DEFAULT_BRAIN_MCP_SERVER_CONFIG,
brainService: null,
connectedClients: 0,
startTime: null,
requestsServed: 0,
lastRequestAt: null,
rateLimitMap: new Map(),
apiKeys: new Set(),
};
const createMcpError = (code: number, message: string, data?: unknown): McpError => ({
code,
message,
data,
});
const createMcpResponse = (
id: string | number,
content?: ReadonlyArray<McpContent>,
error?: McpError
): BrainMcpResponse => {
if (error) {
return { id, error };
}
return {
id,
result: {
content: content || [],
},
};
};
const checkRateLimit = (clientIp: string): boolean => {
if (!state.config.rateLimit.enabled) return true;
const now = Date.now();
const clientLimit = state.rateLimitMap.get(clientIp);
if (!clientLimit || now > clientLimit.resetAt) {
state.rateLimitMap.set(clientIp, {
count: 1,
resetAt: now + state.config.rateLimit.windowMs,
});
return true;
}
if (clientLimit.count >= state.config.rateLimit.maxRequests) {
return false;
}
state.rateLimitMap.set(clientIp, {
...clientLimit,
count: clientLimit.count + 1,
});
return true;
};
const validateApiKey = (req: IncomingMessage): boolean => {
if (!state.config.enableAuth) return true;
const apiKey = req.headers[state.config.apiKeyHeader.toLowerCase()] as string | undefined;
if (!apiKey) return false;
// If no API keys configured, accept any key for now
if (state.apiKeys.size === 0) return true;
return state.apiKeys.has(apiKey);
};
const handleToolCall = async (
toolName: BrainMcpToolName,
args: Record<string, unknown>
): Promise<McpContent[]> => {
if (!state.brainService) {
throw createMcpError(MCP_ERROR_CODES.BRAIN_UNAVAILABLE, BRAIN_MCP_MESSAGES.SERVER_NOT_RUNNING);
}
if (!state.brainService.isConnected()) {
throw createMcpError(MCP_ERROR_CODES.BRAIN_UNAVAILABLE, "Brain service not connected");
}
const tool = BRAIN_MCP_TOOLS.find((t) => t.name === toolName);
if (!tool) {
throw createMcpError(MCP_ERROR_CODES.TOOL_NOT_FOUND, `Tool not found: ${toolName}`);
}
let result: unknown;
const toolHandlers: Record<BrainMcpToolName, () => Promise<unknown>> = {
brain_recall: () => state.brainService!.recall(args.query as string, args.limit as number | undefined),
brain_learn: () => state.brainService!.learn(
args.name as string,
args.whatItDoes as string,
{ keywords: args.keywords, patterns: args.patterns, files: args.files }
),
brain_search: () => state.brainService!.searchMemories(
args.query as string,
args.limit as number | undefined,
args.type as string | undefined
),
brain_relate: () => state.brainService!.relate(
args.sourceConcept as string,
args.targetConcept as string,
args.relationType as string,
args.weight as number | undefined
),
brain_context: () => state.brainService!.getContext(
args.query as string,
args.maxConcepts as number | undefined
),
brain_stats: () => state.brainService!.getStats(),
brain_projects: async () => {
// Import dynamically to avoid circular dependency
const { listProjects } = await import("@src/services/brain/project-service");
return listProjects();
},
};
const handler = toolHandlers[toolName];
if (!handler) {
throw createMcpError(MCP_ERROR_CODES.TOOL_NOT_FOUND, `No handler for tool: ${toolName}`);
}
result = await handler();
return [
{
type: "text",
text: typeof result === "string" ? result : JSON.stringify(result, null, 2),
},
];
};
const handleRequest = async (
req: IncomingMessage,
res: ServerResponse
): Promise<void> => {
// Set CORS headers
res.setHeader("Access-Control-Allow-Origin", state.config.allowedOrigins.join(","));
res.setHeader("Access-Control-Allow-Methods", "POST, OPTIONS");
res.setHeader("Access-Control-Allow-Headers", `Content-Type, ${state.config.apiKeyHeader}`);
// Handle preflight
if (req.method === "OPTIONS") {
res.writeHead(204);
res.end();
return;
}
if (req.method !== "POST") {
res.writeHead(405);
res.end(JSON.stringify(createMcpResponse("", undefined, BRAIN_MCP_ERRORS.INVALID_REQUEST)));
return;
}
// Get client IP for rate limiting
const clientIp = req.socket.remoteAddress || "unknown";
// Check rate limit
if (!checkRateLimit(clientIp)) {
res.writeHead(429);
res.end(JSON.stringify(createMcpResponse("", undefined, BRAIN_MCP_ERRORS.RATE_LIMITED)));
return;
}
// Validate API key
if (!validateApiKey(req)) {
res.writeHead(401);
res.end(JSON.stringify(createMcpResponse("", undefined, BRAIN_MCP_ERRORS.UNAUTHORIZED)));
return;
}
// Parse request body
let body = "";
req.on("data", (chunk) => {
body += chunk;
});
req.on("end", async () => {
state.requestsServed++;
state.lastRequestAt = Date.now();
let mcpRequest: BrainMcpRequest;
try {
mcpRequest = JSON.parse(body) as BrainMcpRequest;
} catch {
res.writeHead(400);
res.end(JSON.stringify(createMcpResponse("", undefined, BRAIN_MCP_ERRORS.PARSE_ERROR)));
return;
}
// Handle MCP request
try {
if (mcpRequest.method === "tools/call") {
const { name, arguments: args } = mcpRequest.params;
const content = await handleToolCall(name, args);
res.writeHead(200, { "Content-Type": "application/json" });
res.end(JSON.stringify(createMcpResponse(mcpRequest.id, content)));
} else if (mcpRequest.method === "tools/list") {
const tools = BRAIN_MCP_TOOLS.map((tool) => ({
name: tool.name,
description: tool.description,
inputSchema: tool.inputSchema,
}));
res.writeHead(200, { "Content-Type": "application/json" });
res.end(JSON.stringify({
id: mcpRequest.id,
result: { tools },
}));
} else {
res.writeHead(400);
res.end(JSON.stringify(createMcpResponse(mcpRequest.id, undefined, BRAIN_MCP_ERRORS.METHOD_NOT_FOUND)));
}
} catch (error) {
const mcpError = error instanceof Object && "code" in error
? error as McpError
: createMcpError(MCP_ERROR_CODES.INTERNAL_ERROR, error instanceof Error ? error.message : "Unknown error");
res.writeHead(500);
res.end(JSON.stringify(createMcpResponse(mcpRequest.id, undefined, mcpError)));
}
});
};
// Public API
export const start = async (
brainService: BrainService,
config?: Partial<BrainMcpServerConfig>
): Promise<void> => {
if (state.server) {
throw new Error(BRAIN_MCP_MESSAGES.SERVER_ALREADY_RUNNING);
}
state.config = { ...DEFAULT_BRAIN_MCP_SERVER_CONFIG, ...config };
state.brainService = brainService;
return new Promise((resolve, reject) => {
state.server = createServer(handleRequest);
state.server.on("error", (error) => {
state.server = null;
reject(error);
});
state.server.listen(state.config.port, state.config.host, () => {
state.startTime = Date.now();
state.requestsServed = 0;
resolve();
});
});
};
export const stop = async (): Promise<void> => {
if (!state.server) {
return;
}
return new Promise((resolve) => {
state.server!.close(() => {
state.server = null;
state.startTime = null;
state.connectedClients = 0;
state.brainService = null;
resolve();
});
});
};
export const getStatus = (): BrainMcpServerStatus => ({
running: state.server !== null,
port: state.config.port,
host: state.config.host,
connectedClients: state.connectedClients,
uptime: state.startTime ? Date.now() - state.startTime : 0,
requestsServed: state.requestsServed,
lastRequestAt: state.lastRequestAt || undefined,
});
export const addApiKey = (key: string): void => {
state.apiKeys.add(key);
};
export const removeApiKey = (key: string): void => {
state.apiKeys.delete(key);
};
export const isRunning = (): boolean => state.server !== null;
export const getConfig = (): BrainMcpServerConfig => ({ ...state.config });
export const updateConfig = (config: Partial<BrainMcpServerConfig>): void => {
state.config = { ...state.config, ...config };
};
export const getAvailableTools = (): ReadonlyArray<{ name: string; description: string }> =>
BRAIN_MCP_TOOLS.map((t) => ({ name: t.name, description: t.description }));

View File

@@ -0,0 +1,270 @@
/**
* Offline Queue
*
* Manages queued changes when offline for later synchronization.
*/
import fs from "fs/promises";
import { join } from "path";
import { DIRS } from "@constants/paths";
import { SYNC_CONFIG, CLOUD_ERRORS } from "@constants/brain-cloud";
import type {
SyncItem,
OfflineQueueItem,
OfflineQueueState,
SyncOperationType,
} from "@/types/brain-cloud";
// Queue file path
const getQueuePath = (): string => join(DIRS.data, "brain-offline-queue.json");
// In-memory queue state
let queueState: OfflineQueueState = {
items: [],
totalSize: 0,
oldestItem: null,
};
let loaded = false;
/**
* Load queue from disk
*/
export const loadQueue = async (): Promise<void> => {
if (loaded) return;
try {
const data = await fs.readFile(getQueuePath(), "utf-8");
const parsed = JSON.parse(data) as OfflineQueueState;
queueState = parsed;
loaded = true;
} catch {
// File doesn't exist or is invalid, start fresh
queueState = {
items: [],
totalSize: 0,
oldestItem: null,
};
loaded = true;
}
};
/**
* Save queue to disk
*/
const saveQueue = async (): Promise<void> => {
try {
await fs.mkdir(DIRS.data, { recursive: true });
await fs.writeFile(getQueuePath(), JSON.stringify(queueState, null, 2));
} catch (error) {
console.error("Failed to save offline queue:", error);
}
};
/**
* Add item to offline queue
*/
export const enqueue = async (item: SyncItem): Promise<boolean> => {
await loadQueue();
// Check queue size limit
if (queueState.items.length >= SYNC_CONFIG.MAX_QUEUE_SIZE) {
throw new Error(CLOUD_ERRORS.QUEUE_FULL);
}
const queueItem: OfflineQueueItem = {
id: generateQueueId(),
item,
retryCount: 0,
lastAttempt: 0,
};
queueState.items.push(queueItem);
queueState.totalSize = queueState.items.length;
queueState.oldestItem = Math.min(
queueState.oldestItem ?? item.timestamp,
item.timestamp,
);
await saveQueue();
return true;
};
/**
* Add multiple items to queue
*/
export const enqueueBatch = async (items: SyncItem[]): Promise<number> => {
await loadQueue();
let added = 0;
for (const item of items) {
if (queueState.items.length >= SYNC_CONFIG.MAX_QUEUE_SIZE) {
break;
}
const queueItem: OfflineQueueItem = {
id: generateQueueId(),
item,
retryCount: 0,
lastAttempt: 0,
};
queueState.items.push(queueItem);
added++;
}
queueState.totalSize = queueState.items.length;
if (added > 0) {
queueState.oldestItem = Math.min(
queueState.oldestItem ?? Date.now(),
...items.map((i) => i.timestamp),
);
}
await saveQueue();
return added;
};
/**
* Get items from queue for processing
*/
export const dequeue = async (limit: number = SYNC_CONFIG.MAX_BATCH_SIZE): Promise<OfflineQueueItem[]> => {
await loadQueue();
// Get items that haven't exceeded retry limit
const available = queueState.items.filter(
(item) => item.retryCount < SYNC_CONFIG.MAX_QUEUE_SIZE,
);
return available.slice(0, limit);
};
/**
* Mark items as processed (remove from queue)
*/
export const markProcessed = async (ids: string[]): Promise<void> => {
await loadQueue();
const idSet = new Set(ids);
queueState.items = queueState.items.filter((item) => !idSet.has(item.id));
queueState.totalSize = queueState.items.length;
// Update oldest item
if (queueState.items.length > 0) {
queueState.oldestItem = Math.min(
...queueState.items.map((i) => i.item.timestamp),
);
} else {
queueState.oldestItem = null;
}
await saveQueue();
};
/**
* Mark items as failed (increment retry count)
*/
export const markFailed = async (
ids: string[],
error?: string,
): Promise<void> => {
await loadQueue();
const now = Date.now();
for (const id of ids) {
const item = queueState.items.find((i) => i.id === id);
if (item) {
item.retryCount++;
item.lastAttempt = now;
item.error = error;
}
}
await saveQueue();
};
/**
* Get queue state
*/
export const getQueueState = async (): Promise<OfflineQueueState> => {
await loadQueue();
return { ...queueState };
};
/**
* Get queue size
*/
export const getQueueSize = async (): Promise<number> => {
await loadQueue();
return queueState.items.length;
};
/**
* Check if queue has items
*/
export const hasQueuedItems = async (): Promise<boolean> => {
await loadQueue();
return queueState.items.length > 0;
};
/**
* Clear the entire queue
*/
export const clearQueue = async (): Promise<void> => {
queueState = {
items: [],
totalSize: 0,
oldestItem: null,
};
await saveQueue();
};
/**
* Remove stale items from queue
*/
export const pruneStaleItems = async (): Promise<number> => {
await loadQueue();
const cutoff = Date.now() - SYNC_CONFIG.STALE_ITEM_AGE_MS;
const before = queueState.items.length;
queueState.items = queueState.items.filter(
(item) => item.item.timestamp > cutoff,
);
queueState.totalSize = queueState.items.length;
const removed = before - queueState.items.length;
if (removed > 0) {
await saveQueue();
}
return removed;
};
/**
* Get items by type
*/
export const getItemsByType = async (
type: "concept" | "memory" | "relation",
): Promise<OfflineQueueItem[]> => {
await loadQueue();
return queueState.items.filter((item) => item.item.type === type);
};
/**
* Get items by operation
*/
export const getItemsByOperation = async (
operation: SyncOperationType,
): Promise<OfflineQueueItem[]> => {
await loadQueue();
return queueState.items.filter((item) => item.item.operation === operation);
};
/**
* Generate unique queue item ID
*/
const generateQueueId = (): string => {
return `q_${Date.now()}_${Math.random().toString(36).substr(2, 9)}`;
};

View File

@@ -0,0 +1,384 @@
/**
* Brain project service
* Manages multiple Brain projects/knowledge bases
*/
import { writeFile, readFile, mkdir } from "node:fs/promises";
import { join } from "node:path";
import { existsSync } from "node:fs";
import { homedir } from "node:os";
import type {
BrainProject,
BrainProjectStats,
BrainProjectSettings,
BrainProjectCreateInput,
BrainProjectUpdateInput,
BrainProjectSwitchResult,
BrainProjectListResult,
BrainProjectExport,
BrainProjectImportResult,
ExportedConcept,
ExportedMemory,
ExportedRelationship,
} from "@src/types/brain-project";
import {
DEFAULT_BRAIN_PROJECT_SETTINGS,
BRAIN_PROJECT_EXPORT_VERSION,
} from "@src/types/brain-project";
import {
BRAIN_PROJECT,
BRAIN_PROJECT_STORAGE,
BRAIN_PROJECT_PATHS,
BRAIN_PROJECT_MESSAGES,
BRAIN_PROJECT_API,
} from "@src/constants/brain-project";
interface ProjectServiceState {
projects: Map<number, BrainProject>;
activeProjectId: number | null;
configPath: string;
initialized: boolean;
}
const state: ProjectServiceState = {
projects: new Map(),
activeProjectId: null,
configPath: join(homedir(), ".local", "share", "codetyper", BRAIN_PROJECT_STORAGE.CONFIG_FILE),
initialized: false,
};
const ensureDirectories = async (): Promise<void> => {
const paths = [
join(homedir(), ".local", "share", "codetyper", "brain"),
join(homedir(), ".local", "share", "codetyper", "brain", "exports"),
join(homedir(), ".local", "share", "codetyper", "brain", "backups"),
];
for (const path of paths) {
if (!existsSync(path)) {
await mkdir(path, { recursive: true });
}
}
};
const loadProjectsFromConfig = async (): Promise<void> => {
if (!existsSync(state.configPath)) {
return;
}
try {
const content = await readFile(state.configPath, "utf-8");
const data = JSON.parse(content) as {
projects: BrainProject[];
activeProjectId: number | null;
};
state.projects.clear();
data.projects.forEach((project) => {
state.projects.set(project.id, project);
});
state.activeProjectId = data.activeProjectId;
} catch {
// Config file corrupted, start fresh
state.projects.clear();
state.activeProjectId = null;
}
};
const saveProjectsToConfig = async (): Promise<void> => {
await ensureDirectories();
const data = {
projects: Array.from(state.projects.values()),
activeProjectId: state.activeProjectId,
version: "1.0.0",
updatedAt: Date.now(),
};
await writeFile(state.configPath, JSON.stringify(data, null, 2));
};
const generateProjectId = (): number => {
const existingIds = Array.from(state.projects.keys());
return existingIds.length > 0 ? Math.max(...existingIds) + 1 : 1;
};
const createDefaultStats = (): BrainProjectStats => ({
conceptCount: 0,
memoryCount: 0,
relationshipCount: 0,
totalTokensUsed: 0,
});
// Public API
export const initialize = async (): Promise<void> => {
if (state.initialized) return;
await ensureDirectories();
await loadProjectsFromConfig();
state.initialized = true;
};
export const createProject = async (input: BrainProjectCreateInput): Promise<BrainProject> => {
await initialize();
// Validate name
if (input.name.length < BRAIN_PROJECT.NAME_MIN_LENGTH) {
throw new Error(BRAIN_PROJECT_MESSAGES.INVALID_NAME);
}
if (input.name.length > BRAIN_PROJECT.NAME_MAX_LENGTH) {
throw new Error(BRAIN_PROJECT_MESSAGES.INVALID_NAME);
}
// Check for duplicate names
const existingProject = Array.from(state.projects.values()).find(
(p) => p.name.toLowerCase() === input.name.toLowerCase()
);
if (existingProject) {
throw new Error(BRAIN_PROJECT_MESSAGES.ALREADY_EXISTS);
}
const now = Date.now();
const project: BrainProject = {
id: generateProjectId(),
name: input.name,
description: input.description || "",
rootPath: input.rootPath,
createdAt: now,
updatedAt: now,
stats: createDefaultStats(),
settings: {
...DEFAULT_BRAIN_PROJECT_SETTINGS,
...input.settings,
},
isActive: false,
};
state.projects.set(project.id, project);
await saveProjectsToConfig();
return project;
};
export const updateProject = async (
projectId: number,
input: BrainProjectUpdateInput
): Promise<BrainProject> => {
await initialize();
const project = state.projects.get(projectId);
if (!project) {
throw new Error(BRAIN_PROJECT_MESSAGES.NOT_FOUND);
}
const updatedProject: BrainProject = {
...project,
name: input.name ?? project.name,
description: input.description ?? project.description,
settings: input.settings
? { ...project.settings, ...input.settings }
: project.settings,
updatedAt: Date.now(),
};
state.projects.set(projectId, updatedProject);
await saveProjectsToConfig();
return updatedProject;
};
export const deleteProject = async (projectId: number): Promise<boolean> => {
await initialize();
const project = state.projects.get(projectId);
if (!project) {
return false;
}
// Can't delete active project
if (state.activeProjectId === projectId) {
state.activeProjectId = null;
}
state.projects.delete(projectId);
await saveProjectsToConfig();
return true;
};
export const switchProject = async (projectId: number): Promise<BrainProjectSwitchResult> => {
await initialize();
const newProject = state.projects.get(projectId);
if (!newProject) {
throw new Error(BRAIN_PROJECT_MESSAGES.NOT_FOUND);
}
const previousProject = state.activeProjectId
? state.projects.get(state.activeProjectId)
: undefined;
// Update active status
if (previousProject) {
state.projects.set(previousProject.id, { ...previousProject, isActive: false });
}
state.projects.set(projectId, { ...newProject, isActive: true });
state.activeProjectId = projectId;
await saveProjectsToConfig();
return {
success: true,
previousProject,
currentProject: state.projects.get(projectId)!,
message: `${BRAIN_PROJECT_MESSAGES.SWITCHED} "${newProject.name}"`,
};
};
export const getProject = async (projectId: number): Promise<BrainProject | undefined> => {
await initialize();
return state.projects.get(projectId);
};
export const getActiveProject = async (): Promise<BrainProject | undefined> => {
await initialize();
return state.activeProjectId ? state.projects.get(state.activeProjectId) : undefined;
};
export const listProjects = async (): Promise<BrainProjectListResult> => {
await initialize();
return {
projects: Array.from(state.projects.values()).sort((a, b) => b.updatedAt - a.updatedAt),
activeProjectId: state.activeProjectId ?? undefined,
total: state.projects.size,
};
};
export const findProjectByPath = async (rootPath: string): Promise<BrainProject | undefined> => {
await initialize();
return Array.from(state.projects.values()).find((p) => p.rootPath === rootPath);
};
export const updateProjectStats = async (
projectId: number,
stats: Partial<BrainProjectStats>
): Promise<void> => {
await initialize();
const project = state.projects.get(projectId);
if (!project) return;
const updatedProject: BrainProject = {
...project,
stats: { ...project.stats, ...stats },
updatedAt: Date.now(),
};
state.projects.set(projectId, updatedProject);
await saveProjectsToConfig();
};
export const exportProject = async (projectId: number): Promise<BrainProjectExport> => {
await initialize();
const project = state.projects.get(projectId);
if (!project) {
throw new Error(BRAIN_PROJECT_MESSAGES.NOT_FOUND);
}
// In a real implementation, this would fetch data from Brain API
// For now, return structure with empty data
const exportData: BrainProjectExport = {
project,
concepts: [],
memories: [],
relationships: [],
exportedAt: Date.now(),
version: BRAIN_PROJECT_EXPORT_VERSION,
};
// Save export file
const exportPath = join(
homedir(),
".local",
"share",
"codetyper",
"brain",
"exports",
`${project.name}-${Date.now()}${BRAIN_PROJECT_STORAGE.EXPORT_EXTENSION}`
);
await writeFile(exportPath, JSON.stringify(exportData, null, 2));
return exportData;
};
export const importProject = async (
exportData: BrainProjectExport
): Promise<BrainProjectImportResult> => {
await initialize();
try {
// Create new project with imported data
const newProject = await createProject({
name: `${exportData.project.name} (imported)`,
description: exportData.project.description,
rootPath: exportData.project.rootPath,
settings: exportData.project.settings,
});
// In a real implementation, this would send data to Brain API
// For now, just return success with counts
return {
success: true,
project: newProject,
imported: {
concepts: exportData.concepts.length,
memories: exportData.memories.length,
relationships: exportData.relationships.length,
},
errors: [],
};
} catch (error) {
return {
success: false,
project: exportData.project,
imported: { concepts: 0, memories: 0, relationships: 0 },
errors: [error instanceof Error ? error.message : "Import failed"],
};
}
};
export const getProjectSettings = async (projectId: number): Promise<BrainProjectSettings | undefined> => {
await initialize();
const project = state.projects.get(projectId);
return project?.settings;
};
export const updateProjectSettings = async (
projectId: number,
settings: Partial<BrainProjectSettings>
): Promise<BrainProjectSettings> => {
const project = await updateProject(projectId, { settings });
return project.settings;
};
export const setActiveProjectByPath = async (rootPath: string): Promise<BrainProject | undefined> => {
const project = await findProjectByPath(rootPath);
if (project) {
await switchProject(project.id);
return project;
}
return undefined;
};

View File

@@ -19,6 +19,8 @@ import {
buildCompletePrompt,
} from "@services/prompt-builder";
import { initSuggestionService } from "@services/command-suggestion-service";
import * as brainService from "@services/brain";
import { BRAIN_DISABLED } from "@constants/brain";
import { addContextFile } from "@services/chat-tui/files";
import type { ProviderName, Message } from "@/types/providers";
import type { ChatSession } from "@/types/index";
@@ -147,6 +149,39 @@ const initializeTheme = async (): Promise<void> => {
}
};
/**
* Initialize brain service and update store state
* Skipped when BRAIN_DISABLED flag is true
*/
const initializeBrain = async (): Promise<void> => {
// Skip brain initialization when disabled
if (BRAIN_DISABLED) {
appStore.setBrainStatus("disconnected");
appStore.setBrainShowBanner(false);
return;
}
try {
appStore.setBrainStatus("connecting");
const connected = await brainService.initialize();
if (connected) {
const state = brainService.getState();
appStore.setBrainStatus("connected");
appStore.setBrainUser(state.user);
appStore.setBrainCounts(state.knowledgeCount, state.memoryCount);
appStore.setBrainShowBanner(false);
} else {
appStore.setBrainStatus("disconnected");
appStore.setBrainShowBanner(true);
}
} catch {
appStore.setBrainStatus("disconnected");
appStore.setBrainShowBanner(true);
}
};
/**
* Rebuild system prompt when interaction mode changes
* Updates both the state and the first message in the conversation
@@ -178,9 +213,13 @@ export const initializeChatService = async (
const initialMode = appStore.getState().interactionMode;
const state = await createInitialState(options, initialMode);
await validateProvider(state);
await buildSystemPrompt(state, options);
await initializeTheme();
// Run provider validation and system prompt building in parallel
// These are independent and both involve async operations
await Promise.all([
validateProvider(state),
buildSystemPrompt(state, options),
initializeTheme(),
]);
const session = await initializeSession(state, options);
@@ -188,9 +227,18 @@ export const initializeChatService = async (
state.messages.push({ role: "system", content: state.systemPrompt });
}
await addInitialContextFiles(state, options.files);
await initializePermissions();
// Run these in parallel - they're independent
await Promise.all([
addInitialContextFiles(state, options.files),
initializePermissions(),
]);
initSuggestionService(process.cwd());
// Initialize brain service (non-blocking, errors silently handled)
initializeBrain().catch(() => {
// Silently fail - brain is optional
});
return { state, session };
};

View File

@@ -366,7 +366,7 @@ export const handleMessage = async (
const modeLabel = interactionMode === "ask" ? "Ask" : "Code Review";
callbacks.onLog(
"system",
`${modeLabel} mode: Read-only tools only (Ctrl+Tab to switch modes)`,
`${modeLabel} mode: Read-only tools only (Ctrl+M to switch modes)`,
);
}

View File

@@ -0,0 +1,209 @@
/**
* Confidence-based filtering service
* Filters PR review issues and agent outputs by confidence score
*/
import type {
ConfidenceScore,
ConfidenceLevel,
ConfidenceFactor,
ConfidenceFilterConfig,
FilteredResult,
ValidationResult,
ConfidenceFilterStats,
} from "@src/types/confidence-filter";
import {
CONFIDENCE_LEVELS,
DEFAULT_CONFIDENCE_FILTER_CONFIG,
} from "@src/types/confidence-filter";
import { CONFIDENCE_FILTER, CONFIDENCE_WEIGHTS } from "@src/constants/confidence-filter";
export const calculateConfidenceLevel = (score: number): ConfidenceLevel => {
const levels = Object.entries(CONFIDENCE_LEVELS) as Array<[ConfidenceLevel, { min: number; max: number }]>;
const found = levels.find(([, range]) => score >= range.min && score <= range.max);
return found ? found[0] : "low";
};
export const calculateConfidenceScore = (factors: ReadonlyArray<ConfidenceFactor>): ConfidenceScore => {
const totalWeight = factors.reduce((sum, f) => sum + f.weight, 0);
const weightedSum = factors.reduce((sum, f) => sum + f.score * f.weight, 0);
const value = totalWeight > 0 ? Math.round(weightedSum / totalWeight) : 0;
return {
value,
level: calculateConfidenceLevel(value),
factors,
};
};
export const createConfidenceFactor = (
name: string,
score: number,
weight: number,
reason: string
): ConfidenceFactor => ({
name,
score: Math.max(0, Math.min(100, score)),
weight: Math.max(0, Math.min(1, weight)),
reason,
});
export const createPatternMatchFactor = (matchCount: number, expectedCount: number): ConfidenceFactor =>
createConfidenceFactor(
"Pattern Match",
Math.min(100, (matchCount / Math.max(1, expectedCount)) * 100),
CONFIDENCE_WEIGHTS.PATTERN_MATCH,
`Matched ${matchCount}/${expectedCount} expected patterns`
);
export const createContextRelevanceFactor = (relevanceScore: number): ConfidenceFactor =>
createConfidenceFactor(
"Context Relevance",
relevanceScore,
CONFIDENCE_WEIGHTS.CONTEXT_RELEVANCE,
`Context relevance score: ${relevanceScore}%`
);
export const createSeverityFactor = (severity: "low" | "medium" | "high" | "critical"): ConfidenceFactor => {
const severityScores: Record<string, number> = { low: 40, medium: 60, high: 80, critical: 95 };
return createConfidenceFactor(
"Severity Level",
severityScores[severity] ?? 50,
CONFIDENCE_WEIGHTS.SEVERITY_LEVEL,
`Issue severity: ${severity}`
);
};
export const createCodeAnalysisFactor = (analysisScore: number): ConfidenceFactor =>
createConfidenceFactor(
"Code Analysis",
analysisScore,
CONFIDENCE_WEIGHTS.CODE_ANALYSIS,
`Static analysis confidence: ${analysisScore}%`
);
export const createHistoricalAccuracyFactor = (accuracy: number): ConfidenceFactor =>
createConfidenceFactor(
"Historical Accuracy",
accuracy,
CONFIDENCE_WEIGHTS.HISTORICAL_ACCURACY,
`Historical accuracy for similar issues: ${accuracy}%`
);
export const filterByConfidence = <T>(
items: ReadonlyArray<{ item: T; confidence: ConfidenceScore }>,
config: ConfidenceFilterConfig = DEFAULT_CONFIDENCE_FILTER_CONFIG
): ReadonlyArray<FilteredResult<T>> =>
items.map(({ item, confidence }) => ({
item,
confidence,
passed: confidence.value >= config.minThreshold,
}));
export const filterPassedOnly = <T>(results: ReadonlyArray<FilteredResult<T>>): ReadonlyArray<T> =>
results.filter((r) => r.passed).map((r) => r.item);
export const groupByConfidenceLevel = <T>(
results: ReadonlyArray<FilteredResult<T>>
): Record<ConfidenceLevel, ReadonlyArray<FilteredResult<T>>> => ({
low: results.filter((r) => r.confidence.level === "low"),
medium: results.filter((r) => r.confidence.level === "medium"),
high: results.filter((r) => r.confidence.level === "high"),
critical: results.filter((r) => r.confidence.level === "critical"),
});
export const calculateFilterStats = <T>(results: ReadonlyArray<FilteredResult<T>>): ConfidenceFilterStats => {
const passed = results.filter((r) => r.passed).length;
const grouped = groupByConfidenceLevel(results);
const totalConfidence = results.reduce((sum, r) => sum + r.confidence.value, 0);
return {
total: results.length,
passed,
filtered: results.length - passed,
byLevel: {
low: grouped.low.length,
medium: grouped.medium.length,
high: grouped.high.length,
critical: grouped.critical.length,
},
averageConfidence: results.length > 0 ? Math.round(totalConfidence / results.length) : 0,
};
};
export const validateConfidence = async (
confidence: ConfidenceScore,
validatorFn: (factors: ReadonlyArray<ConfidenceFactor>) => Promise<{ validated: boolean; adjustment: number; notes: string }>
): Promise<ValidationResult> => {
const result = await validatorFn(confidence.factors);
return {
validated: result.validated,
adjustedConfidence: Math.max(0, Math.min(100, confidence.value + result.adjustment)),
validatorNotes: result.notes,
};
};
export const formatConfidenceScore = (confidence: ConfidenceScore, showFactors: boolean = false): string => {
const levelColors: Record<ConfidenceLevel, string> = {
low: "\x1b[90m",
medium: "\x1b[33m",
high: "\x1b[32m",
critical: "\x1b[31m",
};
const reset = "\x1b[0m";
const color = levelColors[confidence.level];
let result = `${color}[${confidence.value}% - ${confidence.level.toUpperCase()}]${reset}`;
if (showFactors && confidence.factors.length > 0) {
const factorLines = confidence.factors
.map((f) => ` - ${f.name}: ${f.score}% (weight: ${f.weight})`)
.join("\n");
result += `\n${factorLines}`;
}
return result;
};
export const mergeConfidenceFactors = (
existing: ReadonlyArray<ConfidenceFactor>,
additional: ReadonlyArray<ConfidenceFactor>
): ReadonlyArray<ConfidenceFactor> => {
const factorMap = new Map<string, ConfidenceFactor>();
existing.forEach((f) => factorMap.set(f.name, f));
additional.forEach((f) => {
const existingFactor = factorMap.get(f.name);
if (existingFactor) {
// Average the scores if factor already exists
factorMap.set(f.name, {
...f,
score: Math.round((existingFactor.score + f.score) / 2),
});
} else {
factorMap.set(f.name, f);
}
});
return Array.from(factorMap.values());
};
export const adjustThreshold = (
baseThreshold: number,
context: { isCritical: boolean; isAutomated: boolean; userPreference?: number }
): number => {
let threshold = context.userPreference ?? baseThreshold;
// Lower threshold for critical contexts
if (context.isCritical) {
threshold = Math.max(CONFIDENCE_FILTER.MIN_THRESHOLD, threshold - 10);
}
// Higher threshold for automated contexts
if (context.isAutomated) {
threshold = Math.min(CONFIDENCE_FILTER.MAX_THRESHOLD, threshold + 10);
}
return threshold;
};

View File

@@ -44,23 +44,41 @@ const PROVIDER_ENV_VARS: Record<Provider, string> = {
* Config state (singleton pattern using closure)
*/
let configState: Config = getDefaults();
let configLoaded = false;
let configLoadPromise: Promise<void> | null = null;
/**
* Load configuration from file
* Load configuration from file (with caching)
*/
export const loadConfig = async (): Promise<void> => {
try {
const data = await fs.readFile(FILES.config, "utf-8");
const loaded = JSON.parse(data);
// Clean up deprecated keys
delete loaded.models;
configState = { ...getDefaults(), ...loaded };
} catch {
// Config file doesn't exist or is invalid, use defaults
configState = getDefaults();
// Return cached config if already loaded
if (configLoaded) {
return;
}
// If loading is in progress, wait for it
if (configLoadPromise) {
return configLoadPromise;
}
// Start loading
configLoadPromise = (async () => {
try {
const data = await fs.readFile(FILES.config, "utf-8");
const loaded = JSON.parse(data);
// Clean up deprecated keys
delete loaded.models;
configState = { ...getDefaults(), ...loaded };
} catch {
// Config file doesn't exist or is invalid, use defaults
configState = getDefaults();
}
configLoaded = true;
})();
return configLoadPromise;
};
/**

View File

@@ -0,0 +1,209 @@
/**
* Checkpoint Handler
*
* Manages user approval checkpoints during feature development.
*/
import {
PHASE_CHECKPOINTS,
FEATURE_DEV_ERRORS,
} from "@constants/feature-dev";
import type {
FeatureDevPhase,
FeatureDevState,
Checkpoint,
CheckpointDecision,
PhaseExecutionContext,
} from "@/types/feature-dev";
/**
* Create a checkpoint for user approval
*/
export const createCheckpoint = (
phase: FeatureDevPhase,
state: FeatureDevState,
details: string[],
): Checkpoint => {
const config = PHASE_CHECKPOINTS[phase];
return {
phase,
title: config.title,
summary: buildCheckpointSummary(phase, state),
details,
requiresApproval: config.required,
suggestedAction: "approve",
};
};
/**
* Build summary for checkpoint based on phase
*/
const buildCheckpointSummary = (
phase: FeatureDevPhase,
state: FeatureDevState,
): string => {
const summaryBuilders: Record<FeatureDevPhase, () => string> = {
understand: () => {
const reqCount = state.requirements.length;
const clarCount = state.clarifications.length;
return `${reqCount} requirement(s) identified, ${clarCount} clarification(s) made`;
},
explore: () => {
const fileCount = state.relevantFiles.length;
const findingCount = state.explorationResults.reduce(
(sum, r) => sum + r.findings.length,
0,
);
return `Found ${fileCount} relevant file(s) with ${findingCount} finding(s)`;
},
plan: () => {
if (!state.plan) return "No plan created";
const stepCount = state.plan.steps.length;
const complexity = state.plan.estimatedComplexity;
return `${stepCount} step(s) planned, ${complexity} complexity`;
},
implement: () => {
const changeCount = state.changes.length;
const additions = state.changes.reduce((sum, c) => sum + c.additions, 0);
const deletions = state.changes.reduce((sum, c) => sum + c.deletions, 0);
return `${changeCount} file(s) changed (+${additions}/-${deletions})`;
},
verify: () => {
if (!state.testResults) return "Tests not run yet";
const { passedTests, failedTests, totalTests } = state.testResults;
return `${passedTests}/${totalTests} tests passed, ${failedTests} failed`;
},
review: () => {
const issues = state.reviewFindings.filter((f) => f.type === "issue").length;
const suggestions = state.reviewFindings.filter(
(f) => f.type === "suggestion",
).length;
return `${issues} issue(s), ${suggestions} suggestion(s) found`;
},
finalize: () => {
const changeCount = state.changes.length;
return `Ready to commit ${changeCount} file change(s)`;
},
};
return summaryBuilders[phase]();
};
/**
* Check if phase requires a checkpoint
*/
export const requiresCheckpoint = (phase: FeatureDevPhase): boolean => {
return PHASE_CHECKPOINTS[phase].required;
};
/**
* Request user approval at a checkpoint
*/
export const requestApproval = async (
checkpoint: Checkpoint,
ctx: PhaseExecutionContext,
): Promise<{ decision: CheckpointDecision; feedback?: string }> => {
// If no checkpoint handler provided, auto-approve non-required checkpoints
if (!ctx.onCheckpoint) {
if (checkpoint.requiresApproval) {
throw new Error(FEATURE_DEV_ERRORS.CHECKPOINT_REQUIRED(checkpoint.phase));
}
return { decision: "approve" };
}
// Request approval from handler
const result = await ctx.onCheckpoint(checkpoint);
// Record checkpoint in state
ctx.state.checkpoints.push({
checkpoint,
decision: result.decision,
feedback: result.feedback,
timestamp: Date.now(),
});
return result;
};
/**
* Process checkpoint decision
*/
export const processCheckpointDecision = (
decision: CheckpointDecision,
_feedback?: string,
): { proceed: boolean; action?: string } => {
const decisionHandlers: Record<
CheckpointDecision,
() => { proceed: boolean; action?: string }
> = {
approve: () => ({ proceed: true }),
reject: () => ({ proceed: false, action: "rejected" }),
modify: () => ({ proceed: false, action: "modify", }),
skip: () => ({ proceed: true, action: "skipped" }),
abort: () => ({ proceed: false, action: "aborted" }),
};
return decisionHandlers[decision]();
};
/**
* Format checkpoint for display
*/
export const formatCheckpoint = (checkpoint: Checkpoint): string => {
const lines: string[] = [];
lines.push(`## ${checkpoint.title}`);
lines.push("");
lines.push(`**Phase:** ${checkpoint.phase}`);
lines.push(`**Summary:** ${checkpoint.summary}`);
lines.push("");
if (checkpoint.details.length > 0) {
lines.push("### Details");
for (const detail of checkpoint.details) {
lines.push(`- ${detail}`);
}
lines.push("");
}
if (checkpoint.requiresApproval) {
lines.push("*This checkpoint requires your approval to proceed.*");
}
return lines.join("\n");
};
/**
* Get checkpoint history for a phase
*/
export const getPhaseCheckpoints = (
state: FeatureDevState,
phase: FeatureDevPhase,
): Array<{
checkpoint: Checkpoint;
decision: CheckpointDecision;
feedback?: string;
timestamp: number;
}> => {
return state.checkpoints.filter((c) => c.checkpoint.phase === phase);
};
/**
* Check if phase was approved
*/
export const wasPhaseApproved = (
state: FeatureDevState,
phase: FeatureDevPhase,
): boolean => {
const checkpoints = getPhaseCheckpoints(state, phase);
return checkpoints.some(
(c) => c.decision === "approve" || c.decision === "skip",
);
};

View File

@@ -0,0 +1,292 @@
/**
* Context Builder
*
* Builds context for each phase of feature development.
*/
import {
PHASE_PROMPTS,
PHASE_DESCRIPTIONS,
} from "@constants/feature-dev";
import type {
FeatureDevPhase,
FeatureDevState,
} from "@/types/feature-dev";
/**
* Build the full context for a phase execution
*/
export const buildPhaseContext = (
phase: FeatureDevPhase,
state: FeatureDevState,
userRequest: string,
): string => {
const parts: string[] = [];
// Phase header
parts.push(`# Feature Development: ${phase.toUpperCase()} Phase`);
parts.push("");
parts.push(`**Goal:** ${PHASE_DESCRIPTIONS[phase]}`);
parts.push("");
// Phase-specific prompt
parts.push("## Instructions");
parts.push(PHASE_PROMPTS[phase]);
parts.push("");
// User's original request
parts.push("## Feature Request");
parts.push(userRequest);
parts.push("");
// Add state context based on phase
const stateContext = buildStateContext(phase, state);
if (stateContext) {
parts.push("## Current State");
parts.push(stateContext);
parts.push("");
}
return parts.join("\n");
};
/**
* Build state context based on accumulated results
*/
const buildStateContext = (
phase: FeatureDevPhase,
state: FeatureDevState,
): string | null => {
const contextBuilders: Record<FeatureDevPhase, () => string | null> = {
understand: () => null, // No prior context
explore: () => {
if (state.requirements.length === 0) return null;
const lines: string[] = [];
lines.push("### Understood Requirements");
for (const req of state.requirements) {
lines.push(`- ${req}`);
}
if (state.clarifications.length > 0) {
lines.push("");
lines.push("### Clarifications");
for (const c of state.clarifications) {
lines.push(`Q: ${c.question}`);
lines.push(`A: ${c.answer}`);
}
}
return lines.join("\n");
},
plan: () => {
const lines: string[] = [];
// Requirements
if (state.requirements.length > 0) {
lines.push("### Requirements");
for (const req of state.requirements) {
lines.push(`- ${req}`);
}
lines.push("");
}
// Exploration results
if (state.relevantFiles.length > 0) {
lines.push("### Relevant Files Found");
for (const file of state.relevantFiles.slice(0, 10)) {
lines.push(`- ${file}`);
}
if (state.relevantFiles.length > 10) {
lines.push(`- ... and ${state.relevantFiles.length - 10} more`);
}
lines.push("");
}
// Patterns found
const patterns = state.explorationResults.flatMap((r) => r.patterns);
if (patterns.length > 0) {
lines.push("### Patterns to Follow");
for (const pattern of [...new Set(patterns)].slice(0, 5)) {
lines.push(`- ${pattern}`);
}
lines.push("");
}
return lines.length > 0 ? lines.join("\n") : null;
},
implement: () => {
if (!state.plan) return null;
const lines: string[] = [];
lines.push("### Approved Implementation Plan");
lines.push(`**Summary:** ${state.plan.summary}`);
lines.push("");
lines.push("**Steps:**");
for (const step of state.plan.steps) {
lines.push(`${step.order}. [${step.changeType}] ${step.file}`);
lines.push(` ${step.description}`);
}
if (state.plan.risks.length > 0) {
lines.push("");
lines.push("**Risks to Watch:**");
for (const risk of state.plan.risks) {
lines.push(`- ${risk}`);
}
}
return lines.join("\n");
},
verify: () => {
if (state.changes.length === 0) return null;
const lines: string[] = [];
lines.push("### Files Changed");
for (const change of state.changes) {
lines.push(
`- ${change.path} (${change.changeType}, +${change.additions}/-${change.deletions})`,
);
}
if (state.plan?.testStrategy) {
lines.push("");
lines.push("### Test Strategy");
lines.push(state.plan.testStrategy);
}
return lines.join("\n");
},
review: () => {
const lines: string[] = [];
// Changes to review
if (state.changes.length > 0) {
lines.push("### Changes to Review");
for (const change of state.changes) {
lines.push(
`- ${change.path} (${change.changeType}, +${change.additions}/-${change.deletions})`,
);
}
lines.push("");
}
// Test results
if (state.testResults) {
lines.push("### Test Results");
lines.push(
`${state.testResults.passedTests}/${state.testResults.totalTests} tests passed`,
);
if (state.testResults.failedTests > 0) {
lines.push("**Failures:**");
for (const failure of state.testResults.failures) {
lines.push(`- ${failure.testName}: ${failure.error}`);
}
}
lines.push("");
}
return lines.length > 0 ? lines.join("\n") : null;
},
finalize: () => {
const lines: string[] = [];
// Summary of changes
lines.push("### Summary of Changes");
for (const change of state.changes) {
lines.push(
`- ${change.path} (${change.changeType}, +${change.additions}/-${change.deletions})`,
);
}
lines.push("");
// Review findings to address
const issues = state.reviewFindings.filter(
(f) => f.type === "issue" && f.severity === "critical",
);
if (issues.length > 0) {
lines.push("### Outstanding Issues");
for (const issue of issues) {
lines.push(`- [${issue.severity}] ${issue.message}`);
}
lines.push("");
}
// Test status
if (state.testResults) {
const status = state.testResults.passed ? "✓ All tests passing" : "✗ Tests failing";
lines.push(`### Test Status: ${status}`);
}
return lines.join("\n");
},
};
return contextBuilders[phase]();
};
/**
* Build summary of current workflow state
*/
export const buildWorkflowSummary = (state: FeatureDevState): string => {
const lines: string[] = [];
lines.push("# Feature Development Progress");
lines.push("");
lines.push(`**Current Phase:** ${state.phase}`);
lines.push(`**Status:** ${state.phaseStatus}`);
lines.push("");
// Phase completion status
const phases: FeatureDevPhase[] = [
"understand",
"explore",
"plan",
"implement",
"verify",
"review",
"finalize",
];
const currentIndex = phases.indexOf(state.phase);
lines.push("## Progress");
for (let i = 0; i < phases.length; i++) {
const phase = phases[i];
const status =
i < currentIndex
? "✓"
: i === currentIndex
? state.phaseStatus === "completed"
? "✓"
: "→"
: "○";
lines.push(`${status} ${phase}`);
}
return lines.join("\n");
};
/**
* Extract key information from state for quick reference
*/
export const extractKeyInfo = (
state: FeatureDevState,
): Record<string, string | number> => {
return {
phase: state.phase,
status: state.phaseStatus,
requirementsCount: state.requirements.length,
relevantFilesCount: state.relevantFiles.length,
changesCount: state.changes.length,
reviewFindingsCount: state.reviewFindings.length,
checkpointsCount: state.checkpoints.length,
duration: Date.now() - state.startedAt,
};
};

View File

@@ -0,0 +1,290 @@
/**
* Feature-Dev Workflow Service
*
* Main orchestrator for the 7-phase feature development workflow.
*/
import { PHASE_ORDER, FEATURE_DEV_CONFIG, FEATURE_DEV_ERRORS } from "@constants/feature-dev";
import {
executePhase,
validateTransition,
} from "@services/feature-dev/phase-executor";
import { buildWorkflowSummary, extractKeyInfo } from "@services/feature-dev/context-builder";
import type {
FeatureDevPhase,
FeatureDevState,
PhaseExecutionContext,
Checkpoint,
CheckpointDecision,
} from "@/types/feature-dev";
// Re-export sub-modules
export * from "@services/feature-dev/phase-executor";
export * from "@services/feature-dev/checkpoint-handler";
export * from "@services/feature-dev/context-builder";
// Active workflows storage
const activeWorkflows = new Map<string, FeatureDevState>();
/**
* Create a new feature development workflow
*/
export const createWorkflow = (
id: string,
requirements: string[] = [],
): FeatureDevState => {
const state: FeatureDevState = {
id,
phase: "understand",
phaseStatus: "pending",
startedAt: Date.now(),
updatedAt: Date.now(),
requirements,
clarifications: [],
explorationResults: [],
relevantFiles: [],
changes: [],
reviewFindings: [],
checkpoints: [],
};
activeWorkflows.set(id, state);
return state;
};
/**
* Get an active workflow by ID
*/
export const getWorkflow = (id: string): FeatureDevState | undefined => {
return activeWorkflows.get(id);
};
/**
* Update workflow state
*/
export const updateWorkflow = (
id: string,
updates: Partial<FeatureDevState>,
): FeatureDevState | undefined => {
const workflow = activeWorkflows.get(id);
if (!workflow) return undefined;
const updated = {
...workflow,
...updates,
updatedAt: Date.now(),
};
activeWorkflows.set(id, updated);
return updated;
};
/**
* Delete a workflow
*/
export const deleteWorkflow = (id: string): boolean => {
return activeWorkflows.delete(id);
};
/**
* Run the complete feature development workflow
*/
export const runWorkflow = async (
workflowId: string,
userRequest: string,
options: {
config?: Partial<typeof FEATURE_DEV_CONFIG>;
workingDir: string;
sessionId: string;
abortSignal?: AbortSignal;
onProgress?: (message: string) => void;
onCheckpoint?: (checkpoint: Checkpoint) => Promise<{
decision: CheckpointDecision;
feedback?: string;
}>;
},
): Promise<{
success: boolean;
finalState: FeatureDevState;
error?: string;
}> => {
// Merge config with defaults (kept for future extensibility)
void { ...FEATURE_DEV_CONFIG, ...options.config };
// Get or create workflow
let state = getWorkflow(workflowId);
if (!state) {
state = createWorkflow(workflowId);
}
// Build execution context
const ctx: PhaseExecutionContext = {
state,
workingDir: options.workingDir,
sessionId: options.sessionId,
abortSignal: options.abortSignal,
onProgress: options.onProgress,
onCheckpoint: options.onCheckpoint,
};
// Execute phases in order
while (state.phase !== "finalize" || state.phaseStatus !== "completed") {
// Check for abort
if (options.abortSignal?.aborted) {
state.abortReason = "Workflow aborted by user";
state.phaseStatus = "failed";
return {
success: false,
finalState: state,
error: FEATURE_DEV_ERRORS.WORKFLOW_ABORTED(state.abortReason),
};
}
// Execute current phase
const result = await executePhase(state.phase, ctx, userRequest);
// Apply state updates
if (result.stateUpdates) {
state = updateWorkflow(workflowId, result.stateUpdates) ?? state;
ctx.state = state;
}
// Handle phase result
if (!result.success) {
if (state.abortReason) {
// Workflow was aborted
return {
success: false,
finalState: state,
error: result.error,
};
}
// Phase needs attention (rejected, needs modification, etc.)
// Stay in current phase and let caller handle
continue;
}
// Move to next phase
if (result.nextPhase) {
const transition = validateTransition({
fromPhase: state.phase,
toPhase: result.nextPhase,
});
if (!transition.valid) {
return {
success: false,
finalState: state,
error: transition.error,
};
}
state = updateWorkflow(workflowId, {
phase: result.nextPhase,
phaseStatus: "pending",
}) ?? state;
ctx.state = state;
} else {
// No next phase, workflow complete
break;
}
}
return {
success: true,
finalState: state,
};
};
/**
* Get workflow progress summary
*/
export const getWorkflowProgress = (
workflowId: string,
): { summary: string; keyInfo: Record<string, string | number> } | undefined => {
const workflow = getWorkflow(workflowId);
if (!workflow) return undefined;
return {
summary: buildWorkflowSummary(workflow),
keyInfo: extractKeyInfo(workflow),
};
};
/**
* Abort an active workflow
*/
export const abortWorkflow = (
workflowId: string,
reason: string,
): FeatureDevState | undefined => {
return updateWorkflow(workflowId, {
phaseStatus: "failed",
abortReason: reason,
});
};
/**
* Reset workflow to a specific phase
*/
export const resetToPhase = (
workflowId: string,
phase: FeatureDevPhase,
): FeatureDevState | undefined => {
const workflow = getWorkflow(workflowId);
if (!workflow) return undefined;
// Clear state accumulated after this phase
const phaseIndex = PHASE_ORDER.indexOf(phase);
const updates: Partial<FeatureDevState> = {
phase,
phaseStatus: "pending",
};
// Clear phase-specific data based on which phase we're resetting to
if (phaseIndex <= PHASE_ORDER.indexOf("explore")) {
updates.explorationResults = [];
updates.relevantFiles = [];
}
if (phaseIndex <= PHASE_ORDER.indexOf("plan")) {
updates.plan = undefined;
}
if (phaseIndex <= PHASE_ORDER.indexOf("implement")) {
updates.changes = [];
}
if (phaseIndex <= PHASE_ORDER.indexOf("verify")) {
updates.testResults = undefined;
}
if (phaseIndex <= PHASE_ORDER.indexOf("review")) {
updates.reviewFindings = [];
}
if (phaseIndex <= PHASE_ORDER.indexOf("finalize")) {
updates.commitHash = undefined;
}
return updateWorkflow(workflowId, updates);
};
/**
* List all active workflows
*/
export const listWorkflows = (): Array<{
id: string;
phase: FeatureDevPhase;
status: string;
startedAt: number;
}> => {
return Array.from(activeWorkflows.values()).map((w) => ({
id: w.id,
phase: w.phase,
status: w.phaseStatus,
startedAt: w.startedAt,
}));
};
/**
* Create workflow ID
*/
export const createWorkflowId = (): string => {
return `fd_${Date.now()}_${Math.random().toString(36).substr(2, 9)}`;
};

View File

@@ -0,0 +1,345 @@
/**
* Phase Executor
*
* Executes individual phases of the feature development workflow.
*/
import {
PHASE_ORDER,
ALLOWED_TRANSITIONS,
PHASE_TIMEOUTS,
FEATURE_DEV_ERRORS,
FEATURE_DEV_MESSAGES,
} from "@constants/feature-dev";
import {
createCheckpoint,
requiresCheckpoint,
requestApproval,
processCheckpointDecision,
} from "@services/feature-dev/checkpoint-handler";
import { buildPhaseContext } from "@services/feature-dev/context-builder";
import type {
FeatureDevPhase,
PhaseExecutionContext,
PhaseExecutionResult,
PhaseTransitionRequest,
} from "@/types/feature-dev";
/**
* Execute a single phase
*/
export const executePhase = async (
phase: FeatureDevPhase,
ctx: PhaseExecutionContext,
userRequest: string,
): Promise<PhaseExecutionResult> => {
// Update state to in_progress
ctx.state.phase = phase;
ctx.state.phaseStatus = "in_progress";
ctx.state.updatedAt = Date.now();
ctx.onProgress?.(FEATURE_DEV_MESSAGES.STARTING(phase));
try {
// Execute phase-specific logic
const result = await executePhaseLogic(phase, ctx, userRequest);
// Handle checkpoint if needed
if (requiresCheckpoint(phase) || result.checkpoint) {
const checkpoint =
result.checkpoint ?? createCheckpoint(phase, ctx.state, []);
ctx.state.phaseStatus = "awaiting_approval";
const { decision, feedback } = await requestApproval(checkpoint, ctx);
const { proceed, action } = processCheckpointDecision(decision, feedback);
if (!proceed) {
if (action === "aborted") {
ctx.state.abortReason = feedback ?? "User aborted";
return {
success: false,
phase,
error: FEATURE_DEV_ERRORS.WORKFLOW_ABORTED(ctx.state.abortReason),
stateUpdates: { phaseStatus: "failed" },
};
}
// Rejected or modify - stay in current phase
return {
success: false,
phase,
stateUpdates: { phaseStatus: "pending" },
};
}
ctx.state.phaseStatus = "approved";
}
// Phase completed successfully
ctx.state.phaseStatus = "completed";
ctx.state.updatedAt = Date.now();
ctx.onProgress?.(FEATURE_DEV_MESSAGES.COMPLETED(phase));
return {
success: true,
phase,
nextPhase: getNextPhase(phase),
stateUpdates: { phaseStatus: "completed", ...result.stateUpdates },
};
} catch (error) {
const message = error instanceof Error ? error.message : String(error);
ctx.state.phaseStatus = "failed";
return {
success: false,
phase,
error: FEATURE_DEV_ERRORS.PHASE_FAILED(phase, message),
stateUpdates: { phaseStatus: "failed" },
};
}
};
/**
* Execute phase-specific logic
*/
const executePhaseLogic = async (
phase: FeatureDevPhase,
ctx: PhaseExecutionContext,
userRequest: string,
): Promise<Partial<PhaseExecutionResult>> => {
// Build context for this phase
const phaseContext = buildPhaseContext(phase, ctx.state, userRequest);
// Phase-specific execution
const phaseExecutors: Record<
FeatureDevPhase,
() => Promise<Partial<PhaseExecutionResult>>
> = {
understand: async () => executeUnderstandPhase(ctx, phaseContext),
explore: async () => executeExplorePhase(ctx, phaseContext),
plan: async () => executePlanPhase(ctx, phaseContext),
implement: async () => executeImplementPhase(ctx, phaseContext),
verify: async () => executeVerifyPhase(ctx, phaseContext),
review: async () => executeReviewPhase(ctx, phaseContext),
finalize: async () => executeFinalizePhase(ctx, phaseContext),
};
return phaseExecutors[phase]();
};
/**
* Understand phase execution
*/
const executeUnderstandPhase = async (
ctx: PhaseExecutionContext,
_phaseContext: string,
): Promise<Partial<PhaseExecutionResult>> => {
// This phase would typically involve LLM interaction to:
// 1. Parse the user's request
// 2. Identify requirements
// 3. Ask clarifying questions
// For now, return a checkpoint for user confirmation
const checkpoint = createCheckpoint("understand", ctx.state, [
"Review the identified requirements",
"Provide any clarifications needed",
"Confirm understanding is correct",
]);
return {
checkpoint,
stateUpdates: {},
};
};
/**
* Explore phase execution
*/
const executeExplorePhase = async (
ctx: PhaseExecutionContext,
_phaseContext: string,
): Promise<Partial<PhaseExecutionResult>> => {
ctx.onProgress?.(FEATURE_DEV_MESSAGES.EXPLORING("relevant code patterns"));
// This phase would use parallel agents to search the codebase
// For now, return a basic result
return {
stateUpdates: {},
};
};
/**
* Plan phase execution
*/
const executePlanPhase = async (
ctx: PhaseExecutionContext,
_phaseContext: string,
): Promise<Partial<PhaseExecutionResult>> => {
// This phase would involve LLM to create implementation plan
// The plan must be approved before proceeding
const checkpoint = createCheckpoint("plan", ctx.state, [
"Review the implementation plan",
"Check the proposed file changes",
"Verify the approach is correct",
"Consider the identified risks",
]);
return {
checkpoint,
stateUpdates: {},
};
};
/**
* Implement phase execution
*/
const executeImplementPhase = async (
ctx: PhaseExecutionContext,
_phaseContext: string,
): Promise<Partial<PhaseExecutionResult>> => {
// Verify we have a plan
if (!ctx.state.plan) {
throw new Error(FEATURE_DEV_ERRORS.NO_PLAN);
}
// This phase would execute each step in the plan
const totalSteps = ctx.state.plan.steps.length;
for (let i = 0; i < totalSteps; i++) {
ctx.onProgress?.(FEATURE_DEV_MESSAGES.IMPLEMENTING_STEP(i + 1, totalSteps));
// Step execution would happen here
}
return {
stateUpdates: {},
};
};
/**
* Verify phase execution
*/
const executeVerifyPhase = async (
ctx: PhaseExecutionContext,
_phaseContext: string,
): Promise<Partial<PhaseExecutionResult>> => {
ctx.onProgress?.(FEATURE_DEV_MESSAGES.RUNNING_TESTS);
// This phase would run the test suite
// For now, create a checkpoint for test review
const checkpoint = createCheckpoint("verify", ctx.state, [
"Review test results",
"Check for any failures",
"Verify coverage is adequate",
]);
return {
checkpoint,
stateUpdates: {},
};
};
/**
* Review phase execution
*/
const executeReviewPhase = async (
ctx: PhaseExecutionContext,
_phaseContext: string,
): Promise<Partial<PhaseExecutionResult>> => {
ctx.onProgress?.(FEATURE_DEV_MESSAGES.REVIEWING);
// This phase would perform self-review of changes
const checkpoint = createCheckpoint("review", ctx.state, [
"Review code quality findings",
"Address any critical issues",
"Confirm changes are ready",
]);
return {
checkpoint,
stateUpdates: {},
};
};
/**
* Finalize phase execution
*/
const executeFinalizePhase = async (
ctx: PhaseExecutionContext,
_phaseContext: string,
): Promise<Partial<PhaseExecutionResult>> => {
ctx.onProgress?.(FEATURE_DEV_MESSAGES.FINALIZING);
// This phase would create the commit
const checkpoint = createCheckpoint("finalize", ctx.state, [
"Confirm commit message",
"Verify all changes are included",
"Approve final commit",
]);
return {
checkpoint,
stateUpdates: {},
};
};
/**
* Get the next phase in the workflow
*/
export const getNextPhase = (
currentPhase: FeatureDevPhase,
): FeatureDevPhase | undefined => {
const currentIndex = PHASE_ORDER.indexOf(currentPhase);
if (currentIndex === -1 || currentIndex >= PHASE_ORDER.length - 1) {
return undefined;
}
return PHASE_ORDER[currentIndex + 1];
};
/**
* Get the previous phase in the workflow
*/
export const getPreviousPhase = (
currentPhase: FeatureDevPhase,
): FeatureDevPhase | undefined => {
const currentIndex = PHASE_ORDER.indexOf(currentPhase);
if (currentIndex <= 0) {
return undefined;
}
return PHASE_ORDER[currentIndex - 1];
};
/**
* Validate a phase transition
*/
export const validateTransition = (
request: PhaseTransitionRequest,
): { valid: boolean; error?: string } => {
if (request.skipValidation) {
return { valid: true };
}
const allowed = ALLOWED_TRANSITIONS[request.fromPhase];
if (!allowed.includes(request.toPhase)) {
return {
valid: false,
error: FEATURE_DEV_ERRORS.INVALID_TRANSITION(
request.fromPhase,
request.toPhase,
),
};
}
return { valid: true };
};
/**
* Get timeout for a phase
*/
export const getPhaseTimeout = (phase: FeatureDevPhase): number => {
return PHASE_TIMEOUTS[phase];
};

View File

@@ -8,3 +8,4 @@ export * from "@services/github-issue-service";
export * from "@services/command-suggestion-service";
export * from "@services/learning-service";
export * from "@services/rules-service";
export * as brainService from "@services/brain";

View File

@@ -0,0 +1,225 @@
/**
* Model Routing Service
*
* Maps agent tiers to appropriate models based on task complexity.
* Following Claude Code's multi-model strategy:
* - fast: Quick screening, filtering (like Haiku)
* - balanced: Detailed analysis, general tasks (like Sonnet)
* - thorough: Complex reasoning, bug hunting (like Opus)
*/
import { getModelContextSize } from "@constants/copilot";
import type { AgentConfig } from "@/types/agent-config";
/**
* Model tier for routing decisions
*/
export type ModelTier = "fast" | "balanced" | "thorough";
/**
* Model tier mapping to Copilot models
* These are the default mappings - can be overridden by agent config
*/
export const MODEL_TIER_MAPPING: Record<ModelTier, string[]> = {
// Fast tier: Low cost, quick responses (0x or 0.33x multiplier)
fast: [
"gpt-5-mini",
"gpt-4o-mini",
"claude-haiku-4.5",
"gemini-3-flash-preview",
"grok-code-fast-1",
],
// Balanced tier: Good quality, moderate cost (1x multiplier)
balanced: [
"claude-sonnet-4.5",
"claude-sonnet-4",
"gpt-5",
"gpt-5.1",
"gemini-2.5-pro",
"gpt-4.1",
],
// Thorough tier: Best quality, higher cost (3x multiplier)
thorough: [
"claude-opus-4.5",
"gpt-5.2-codex",
"gpt-5.1-codex-max",
],
};
/**
* Tier aliases for agent frontmatter
*/
const TIER_ALIASES: Record<string, ModelTier> = {
haiku: "fast",
fast: "fast",
quick: "fast",
sonnet: "balanced",
balanced: "balanced",
default: "balanced",
opus: "thorough",
thorough: "thorough",
deep: "thorough",
};
/**
* Agent type to default tier mapping
*/
const AGENT_TYPE_TIERS: Record<string, ModelTier> = {
explorer: "fast",
explore: "fast",
filter: "fast",
screen: "fast",
architect: "balanced",
planner: "balanced",
plan: "balanced",
coder: "balanced",
general: "balanced",
reviewer: "balanced",
review: "balanced",
"code-reviewer": "balanced",
"bug-hunter": "thorough",
bugs: "thorough",
security: "thorough",
compaction: "fast",
summary: "fast",
title: "fast",
};
/**
* Resolve model tier from string (tier name or model ID)
*/
export const resolveTier = (modelOrTier: string): ModelTier | null => {
const lower = modelOrTier.toLowerCase();
// Check if it's a tier alias
if (lower in TIER_ALIASES) {
return TIER_ALIASES[lower];
}
// Check if it's already a model ID in one of the tiers
for (const [tier, models] of Object.entries(MODEL_TIER_MAPPING)) {
if (models.some((m) => m.toLowerCase() === lower)) {
return tier as ModelTier;
}
}
return null;
};
/**
* Get the best available model for a tier
* Returns the first model in the tier's list (assumed to be preference order)
*/
export const getModelForTier = (
tier: ModelTier,
availableModels?: string[],
): string => {
const tierModels = MODEL_TIER_MAPPING[tier];
if (availableModels && availableModels.length > 0) {
// Find first available model from tier
for (const model of tierModels) {
if (availableModels.includes(model)) {
return model;
}
}
// Fallback to first tier model if none available
return tierModels[0];
}
return tierModels[0];
};
/**
* Infer tier from agent type/name
*/
export const inferTierFromAgent = (agent: AgentConfig): ModelTier => {
const idLower = agent.id.toLowerCase();
const nameLower = agent.name.toLowerCase();
// Check agent type mapping
for (const [type, tier] of Object.entries(AGENT_TYPE_TIERS)) {
if (idLower.includes(type) || nameLower.includes(type)) {
return tier;
}
}
// Default to balanced
return "balanced";
};
/**
* Resolve the model to use for an agent
*
* Priority:
* 1. Explicit model in agent config (full model ID)
* 2. Tier specified in agent config (fast/balanced/thorough)
* 3. Inferred from agent type/name
* 4. Default model passed in
*/
export const resolveAgentModel = (
agent: AgentConfig,
defaultModel: string,
availableModels?: string[],
): { model: string; tier: ModelTier; source: string } => {
// 1. Check explicit model in agent config
if (agent.model) {
// Check if it's a tier name
const tier = resolveTier(agent.model);
if (tier) {
const model = getModelForTier(tier, availableModels);
return { model, tier, source: "agent-tier" };
}
// Otherwise use as model ID
return {
model: agent.model,
tier: resolveTier(agent.model) ?? "balanced",
source: "agent-model",
};
}
// 2. Infer from agent type
const inferredTier = inferTierFromAgent(agent);
if (inferredTier !== "balanced") {
const model = getModelForTier(inferredTier, availableModels);
return { model, tier: inferredTier, source: "agent-inferred" };
}
// 3. Use default
const defaultTier = resolveTier(defaultModel) ?? "balanced";
return { model: defaultModel, tier: defaultTier, source: "default" };
};
/**
* Get model context size for routing decisions
*/
export const getRouteContextSize = (modelId: string): number => {
return getModelContextSize(modelId).input;
};
/**
* Model routing decision
*/
export interface ModelRoutingDecision {
model: string;
tier: ModelTier;
source: string;
contextSize: number;
}
/**
* Make routing decision for an agent
*/
export const routeAgent = (
agent: AgentConfig,
defaultModel: string,
availableModels?: string[],
): ModelRoutingDecision => {
const resolution = resolveAgentModel(agent, defaultModel, availableModels);
return {
...resolution,
contextSize: getRouteContextSize(resolution.model),
};
};

View File

@@ -0,0 +1,241 @@
/**
* Conflict Detector
*
* Detects conflicts between parallel tasks based on file paths
* and task types. Read-only tasks don't conflict with each other.
*/
import { CONFLICT_CONFIG, READ_ONLY_TASK_TYPES, MODIFYING_TASK_TYPES } from "@constants/parallel";
import type {
ParallelTask,
ConflictCheckResult,
ConflictResolution,
} from "@/types/parallel";
/**
* Active tasks being tracked for conflicts
*/
const activeTasks = new Map<string, ParallelTask>();
/**
* Register a task as active
*/
export const registerActiveTask = (task: ParallelTask): void => {
activeTasks.set(task.id, task);
};
/**
* Unregister a task when completed
*/
export const unregisterActiveTask = (taskId: string): void => {
activeTasks.delete(taskId);
};
/**
* Clear all active tasks
*/
export const clearActiveTasks = (): void => {
activeTasks.clear();
};
/**
* Get all active task IDs
*/
export const getActiveTaskIds = (): string[] => {
return Array.from(activeTasks.keys());
};
/**
* Check if two tasks conflict based on their paths
*/
const checkPathConflict = (
taskA: ParallelTask,
taskB: ParallelTask,
): string[] => {
const pathsA = taskA.conflictPaths ?? [];
const pathsB = taskB.conflictPaths ?? [];
const conflictingPaths: string[] = [];
for (const pathA of pathsA) {
for (const pathB of pathsB) {
if (pathsOverlap(pathA, pathB)) {
conflictingPaths.push(pathA);
}
}
}
return conflictingPaths;
};
/**
* Check if two paths overlap (one contains or equals the other)
*/
const pathsOverlap = (pathA: string, pathB: string): boolean => {
const normalizedA = normalizePath(pathA);
const normalizedB = normalizePath(pathB);
// Exact match
if (normalizedA === normalizedB) return true;
// One is parent of the other
if (normalizedA.startsWith(normalizedB + "/")) return true;
if (normalizedB.startsWith(normalizedA + "/")) return true;
return false;
};
/**
* Normalize path for comparison
*/
const normalizePath = (path: string): string => {
return path.replace(/\\/g, "/").replace(/\/+/g, "/").replace(/\/$/, "");
};
/**
* Check if task types can conflict
*/
const canTypesConflict = (typeA: string, typeB: string): boolean => {
// Read-only tasks don't conflict with each other
if (READ_ONLY_TASK_TYPES.has(typeA) && READ_ONLY_TASK_TYPES.has(typeB)) {
return false;
}
// Modifying tasks conflict with everything on same paths
if (MODIFYING_TASK_TYPES.has(typeA) || MODIFYING_TASK_TYPES.has(typeB)) {
return true;
}
return false;
};
/**
* Check if a task conflicts with any active tasks
*/
export const checkConflicts = (task: ParallelTask): ConflictCheckResult => {
if (!CONFLICT_CONFIG.ENABLE_PATH_CONFLICT) {
return {
hasConflict: false,
conflictingTaskIds: [],
conflictingPaths: [],
};
}
const conflictingTaskIds: string[] = [];
const conflictingPaths: string[] = [];
for (const [activeId, activeTask] of activeTasks) {
// Skip self
if (activeId === task.id) continue;
// Check if task types can conflict
if (!canTypesConflict(task.type, activeTask.type)) continue;
// Check path conflicts
const pathConflicts = checkPathConflict(task, activeTask);
if (pathConflicts.length > 0) {
conflictingTaskIds.push(activeId);
conflictingPaths.push(...pathConflicts);
}
}
const hasConflict = conflictingTaskIds.length > 0;
// Suggest resolution
const resolution = hasConflict ? suggestResolution(task, conflictingTaskIds) : undefined;
return {
hasConflict,
conflictingTaskIds,
conflictingPaths: [...new Set(conflictingPaths)],
resolution,
};
};
/**
* Suggest a conflict resolution strategy
*/
const suggestResolution = (
task: ParallelTask,
conflictingTaskIds: string[],
): ConflictResolution => {
// Read-only tasks should wait
if (READ_ONLY_TASK_TYPES.has(task.type)) {
return "wait";
}
// High priority tasks may cancel lower priority conflicts
const conflictingTasks = conflictingTaskIds
.map((id) => activeTasks.get(id))
.filter((t): t is ParallelTask => t !== undefined);
const allLowerPriority = conflictingTasks.every(
(t) => getPriorityValue(t.priority) < getPriorityValue(task.priority),
);
if (allLowerPriority && task.priority === "critical") {
return "cancel";
}
// Default to waiting
return "wait";
};
/**
* Get numeric priority value
*/
const getPriorityValue = (priority: string): number => {
const values: Record<string, number> = {
critical: 100,
high: 75,
normal: 50,
low: 25,
};
return values[priority] ?? 50;
};
/**
* Wait for conflicts to resolve
*/
export const waitForConflictResolution = async (
taskIds: string[],
timeout: number = CONFLICT_CONFIG.CONFLICT_CHECK_TIMEOUT_MS,
): Promise<boolean> => {
const startTime = Date.now();
while (Date.now() - startTime < timeout) {
const stillActive = taskIds.filter((id) => activeTasks.has(id));
if (stillActive.length === 0) {
return true;
}
// Wait a bit before checking again
await new Promise((resolve) => setTimeout(resolve, 100));
}
return false;
};
/**
* Get tasks that would be affected by cancelling a task
*/
export const getDependentTasks = (taskId: string): string[] => {
const task = activeTasks.get(taskId);
if (!task) return [];
const dependents: string[] = [];
for (const [id, activeTask] of activeTasks) {
if (id === taskId) continue;
// Check if this task was waiting on the cancelled task
const conflicts = checkPathConflict(activeTask, task);
if (conflicts.length > 0) {
dependents.push(id);
}
}
return dependents;
};

View File

@@ -0,0 +1,351 @@
/**
* Parallel Executor
*
* Main orchestrator for parallel task execution.
* Coordinates conflict detection, resource management, and result aggregation.
*/
import { PARALLEL_DEFAULTS, PARALLEL_ERRORS, TASK_TIMEOUTS } from "@constants/parallel";
import {
registerActiveTask,
unregisterActiveTask,
checkConflicts,
waitForConflictResolution,
clearActiveTasks,
} from "@services/parallel/conflict-detector";
import {
initializeResourceManager,
acquireResources,
releaseResources,
canAcceptTask,
cancelWaitingTask,
resetResourceManager,
getResourceState,
} from "@services/parallel/resource-manager";
import { collectResults } from "@services/parallel/result-aggregator";
import type {
ParallelTask,
ParallelExecutionResult,
ParallelExecutorOptions,
AggregatedResults,
BatchExecutionRequest,
ConflictResolution,
} from "@/types/parallel";
// Re-export utilities
export * from "@services/parallel/conflict-detector";
export * from "@services/parallel/resource-manager";
export * from "@services/parallel/result-aggregator";
// ============================================================================
// Task Execution
// ============================================================================
/**
* Execute a single task with timeout and error handling
*/
const executeTask = async <TInput, TOutput>(
task: ParallelTask<TInput, TOutput>,
executor: (input: TInput) => Promise<TOutput>,
options: ParallelExecutorOptions,
): Promise<ParallelExecutionResult<TOutput>> => {
const startedAt = Date.now();
const timeout = task.timeout ?? TASK_TIMEOUTS[task.type] ?? PARALLEL_DEFAULTS.defaultTimeout;
try {
// Notify task start
options.onTaskStart?.(task);
// Execute with timeout
const result = await Promise.race([
executor(task.input),
createTimeout<TOutput>(timeout, task.id),
]);
const completedAt = Date.now();
const executionResult: ParallelExecutionResult<TOutput> = {
taskId: task.id,
status: "completed",
result,
duration: completedAt - startedAt,
startedAt,
completedAt,
};
options.onTaskComplete?.(executionResult);
return executionResult;
} catch (error) {
const completedAt = Date.now();
const isTimeout = error instanceof TimeoutError;
const executionResult: ParallelExecutionResult<TOutput> = {
taskId: task.id,
status: isTimeout ? "timeout" : "error",
error: error instanceof Error ? error.message : String(error),
duration: completedAt - startedAt,
startedAt,
completedAt,
};
options.onTaskError?.(task, error instanceof Error ? error : new Error(String(error)));
return executionResult;
}
};
/**
* Create a timeout promise
*/
class TimeoutError extends Error {
constructor(taskId: string) {
super(PARALLEL_ERRORS.TIMEOUT(taskId));
this.name = "TimeoutError";
}
}
const createTimeout = <T>(ms: number, taskId: string): Promise<T> => {
return new Promise((_, reject) => {
setTimeout(() => reject(new TimeoutError(taskId)), ms);
});
};
// ============================================================================
// Parallel Executor
// ============================================================================
/**
* Execute tasks in parallel with conflict detection and resource management
*/
export const executeParallel = async <TInput, TOutput>(
tasks: ParallelTask<TInput, TOutput>[],
executor: (input: TInput) => Promise<TOutput>,
options: Partial<ParallelExecutorOptions> = {},
): Promise<AggregatedResults<TOutput>> => {
const fullOptions: ParallelExecutorOptions = {
limits: options.limits ?? PARALLEL_DEFAULTS,
onTaskStart: options.onTaskStart,
onTaskComplete: options.onTaskComplete,
onTaskError: options.onTaskError,
onConflict: options.onConflict,
abortSignal: options.abortSignal,
};
// Initialize resource manager
initializeResourceManager(fullOptions.limits);
// Track results
const results: ParallelExecutionResult<TOutput>[] = [];
const pendingTasks = new Map<string, Promise<ParallelExecutionResult<TOutput>>>();
// Check if executor was aborted
const checkAbort = (): boolean => {
return fullOptions.abortSignal?.aborted ?? false;
};
// Process each task
for (const task of tasks) {
if (checkAbort()) {
results.push({
taskId: task.id,
status: "cancelled",
error: PARALLEL_ERRORS.EXECUTOR_ABORTED,
duration: 0,
startedAt: Date.now(),
completedAt: Date.now(),
});
continue;
}
// Check if we can accept more tasks
if (!canAcceptTask(fullOptions.limits)) {
results.push({
taskId: task.id,
status: "error",
error: PARALLEL_ERRORS.QUEUE_FULL,
duration: 0,
startedAt: Date.now(),
completedAt: Date.now(),
});
continue;
}
// Start task execution
const taskPromise = executeWithConflictHandling(
task,
executor,
fullOptions,
);
pendingTasks.set(task.id, taskPromise);
// Remove from pending when done
taskPromise.then((result) => {
pendingTasks.delete(task.id);
results.push(result);
});
}
// Wait for all pending tasks
await Promise.all(pendingTasks.values());
// Cleanup
clearActiveTasks();
return collectResults(results);
};
/**
* Execute a task with conflict handling
*/
const executeWithConflictHandling = async <TInput, TOutput>(
task: ParallelTask<TInput, TOutput>,
executor: (input: TInput) => Promise<TOutput>,
options: ParallelExecutorOptions,
): Promise<ParallelExecutionResult<TOutput>> => {
// Acquire resources
await acquireResources(task);
try {
// Check for conflicts
const conflicts = checkConflicts(task);
if (conflicts.hasConflict) {
const resolution = options.onConflict?.(task, conflicts) ?? conflicts.resolution ?? "wait";
const handled = await handleConflict(task, conflicts, resolution, options);
if (!handled.continue) {
releaseResources(task, 0, false);
return handled.result;
}
}
// Register as active
registerActiveTask(task);
// Execute task
const result = await executeTask(task, executor, options);
// Unregister and release resources
unregisterActiveTask(task.id);
releaseResources(task, result.duration, result.status === "completed");
return result;
} catch (error) {
releaseResources(task, 0, false);
throw error;
}
};
/**
* Handle task conflict based on resolution strategy
*/
const handleConflict = async <TInput, TOutput>(
task: ParallelTask<TInput, TOutput>,
conflicts: { conflictingTaskIds: string[]; conflictingPaths: string[] },
resolution: ConflictResolution,
_options: ParallelExecutorOptions,
): Promise<{ continue: boolean; result: ParallelExecutionResult<TOutput> }> => {
const createFailResult = (status: "conflict" | "cancelled", error: string) => ({
continue: false,
result: {
taskId: task.id,
status,
error,
duration: 0,
startedAt: Date.now(),
completedAt: Date.now(),
} as ParallelExecutionResult<TOutput>,
});
const resolutionHandlers: Record<
ConflictResolution,
() => Promise<{ continue: boolean; result: ParallelExecutionResult<TOutput> }>
> = {
wait: async () => {
const resolved = await waitForConflictResolution(conflicts.conflictingTaskIds);
if (resolved) {
return { continue: true, result: {} as ParallelExecutionResult<TOutput> };
}
return createFailResult("conflict", PARALLEL_ERRORS.CONFLICT(task.id, conflicts.conflictingPaths));
},
cancel: async () => {
// Cancel conflicting tasks
for (const id of conflicts.conflictingTaskIds) {
cancelWaitingTask(id);
}
return { continue: true, result: {} as ParallelExecutionResult<TOutput> };
},
merge: async () => {
// For merge, we continue and let result aggregator handle merging
return { continue: true, result: {} as ParallelExecutionResult<TOutput> };
},
abort: async () => {
return createFailResult("conflict", PARALLEL_ERRORS.CONFLICT(task.id, conflicts.conflictingPaths));
},
};
return resolutionHandlers[resolution]();
};
// ============================================================================
// Batch Execution
// ============================================================================
/**
* Execute a batch of tasks
*/
export const executeBatch = async <TInput, TOutput>(
request: BatchExecutionRequest<TInput>,
executor: (input: TInput) => Promise<TOutput>,
): Promise<AggregatedResults<TOutput>> => {
return executeParallel(
request.tasks as ParallelTask<TInput, TOutput>[],
executor,
request.options,
);
};
// ============================================================================
// Utility Functions
// ============================================================================
/**
* Create a task ID
*/
export const createTaskId = (): string => {
return `task_${Date.now()}_${Math.random().toString(36).substr(2, 9)}`;
};
/**
* Create a parallel task
*/
export const createTask = <TInput>(
input: TInput,
options: Partial<ParallelTask<TInput>> = {},
): ParallelTask<TInput> => ({
id: options.id ?? createTaskId(),
type: options.type ?? "explore",
agent: options.agent ?? { name: "default" },
input,
priority: options.priority ?? "normal",
conflictPaths: options.conflictPaths,
timeout: options.timeout,
metadata: options.metadata,
});
/**
* Reset the parallel executor
*/
export const resetParallelExecutor = (): void => {
clearActiveTasks();
resetResourceManager();
};
/**
* Get execution statistics
*/
export const getExecutionStats = () => {
return getResourceState();
};

View File

@@ -0,0 +1,274 @@
/**
* Resource Manager
*
* Manages concurrent task execution limits using a semaphore pattern.
* Handles task queuing, priority ordering, and rate limiting.
*/
import {
PARALLEL_DEFAULTS,
PRIORITY_WEIGHTS,
TASK_TYPE_LIMITS,
PARALLEL_ERRORS,
} from "@constants/parallel";
import type {
ParallelTask,
ResourceLimits,
ResourceState,
SemaphoreState,
TaskPriority,
} from "@/types/parallel";
// ============================================================================
// Semaphore Implementation
// ============================================================================
interface WaitingTask {
task: ParallelTask;
resolve: () => void;
reject: (reason: Error) => void;
}
class Semaphore {
private permits: number;
private readonly maxPermits: number;
private waiting: WaitingTask[] = [];
constructor(permits: number) {
this.permits = permits;
this.maxPermits = permits;
}
async acquire(task: ParallelTask): Promise<void> {
if (this.permits > 0) {
this.permits--;
return;
}
return new Promise<void>((resolve, reject) => {
this.waiting.push({ task, resolve, reject });
// Sort by priority (highest first)
this.waiting.sort(
(a, b) =>
PRIORITY_WEIGHTS[b.task.priority] - PRIORITY_WEIGHTS[a.task.priority],
);
});
}
release(): void {
if (this.waiting.length > 0) {
const next = this.waiting.shift();
if (next) {
next.resolve();
}
} else {
this.permits = Math.min(this.permits + 1, this.maxPermits);
}
}
cancelWaiting(taskId: string): boolean {
const index = this.waiting.findIndex((w) => w.task.id === taskId);
if (index === -1) return false;
const [removed] = this.waiting.splice(index, 1);
removed.reject(new Error(PARALLEL_ERRORS.CANCELLED(taskId)));
return true;
}
getState(): SemaphoreState {
return {
permits: this.permits,
maxPermits: this.maxPermits,
waiting: this.waiting.length,
};
}
clearWaiting(): void {
for (const waiting of this.waiting) {
waiting.reject(new Error(PARALLEL_ERRORS.EXECUTOR_ABORTED));
}
this.waiting = [];
}
}
// ============================================================================
// Resource Manager
// ============================================================================
let globalSemaphore: Semaphore | null = null;
const taskTypeSemaphores = new Map<string, Semaphore>();
let resourceState: ResourceState = {
activeTasks: 0,
queuedTasks: 0,
completedTasks: 0,
failedTasks: 0,
totalDuration: 0,
};
/**
* Initialize resource manager with limits
*/
export const initializeResourceManager = (
limits: ResourceLimits = PARALLEL_DEFAULTS,
): void => {
globalSemaphore = new Semaphore(limits.maxConcurrentTasks);
// Create per-type semaphores
taskTypeSemaphores.clear();
for (const [type, limit] of Object.entries(TASK_TYPE_LIMITS)) {
taskTypeSemaphores.set(type, new Semaphore(limit));
}
// Reset state
resourceState = {
activeTasks: 0,
queuedTasks: 0,
completedTasks: 0,
failedTasks: 0,
totalDuration: 0,
};
};
/**
* Acquire resources for a task
*/
export const acquireResources = async (task: ParallelTask): Promise<void> => {
if (!globalSemaphore) {
initializeResourceManager();
}
resourceState.queuedTasks++;
try {
// Acquire global permit
await globalSemaphore!.acquire(task);
// Acquire type-specific permit if exists
const typeSemaphore = taskTypeSemaphores.get(task.type);
if (typeSemaphore) {
await typeSemaphore.acquire(task);
}
resourceState.queuedTasks--;
resourceState.activeTasks++;
} catch (error) {
resourceState.queuedTasks--;
throw error;
}
};
/**
* Release resources after task completion
*/
export const releaseResources = (task: ParallelTask, duration: number, success: boolean): void => {
if (!globalSemaphore) return;
// Release global permit
globalSemaphore.release();
// Release type-specific permit
const typeSemaphore = taskTypeSemaphores.get(task.type);
if (typeSemaphore) {
typeSemaphore.release();
}
// Update state
resourceState.activeTasks--;
resourceState.totalDuration += duration;
if (success) {
resourceState.completedTasks++;
} else {
resourceState.failedTasks++;
}
};
/**
* Cancel a waiting task
*/
export const cancelWaitingTask = (taskId: string): boolean => {
if (!globalSemaphore) return false;
const cancelled = globalSemaphore.cancelWaiting(taskId);
if (cancelled) {
resourceState.queuedTasks--;
}
return cancelled;
};
/**
* Get current resource state
*/
export const getResourceState = (): ResourceState => ({
...resourceState,
});
/**
* Get semaphore state for a task type
*/
export const getTypeSemaphoreState = (type: string): SemaphoreState | null => {
const semaphore = taskTypeSemaphores.get(type);
return semaphore ? semaphore.getState() : null;
};
/**
* Get global semaphore state
*/
export const getGlobalSemaphoreState = (): SemaphoreState | null => {
return globalSemaphore ? globalSemaphore.getState() : null;
};
/**
* Check if we can accept more tasks
*/
export const canAcceptTask = (
limits: ResourceLimits = PARALLEL_DEFAULTS,
): boolean => {
const totalPending = resourceState.activeTasks + resourceState.queuedTasks;
return totalPending < limits.maxQueueSize;
};
/**
* Reset resource manager
*/
export const resetResourceManager = (): void => {
if (globalSemaphore) {
globalSemaphore.clearWaiting();
}
for (const semaphore of taskTypeSemaphores.values()) {
semaphore.clearWaiting();
}
resourceState = {
activeTasks: 0,
queuedTasks: 0,
completedTasks: 0,
failedTasks: 0,
totalDuration: 0,
};
};
/**
* Get queue position for a task based on priority
*/
export const getQueuePosition = (priority: TaskPriority): number => {
if (!globalSemaphore) return 0;
const state = globalSemaphore.getState();
// Estimate position based on priority
// Higher priority tasks will be processed first
const priorityWeight = PRIORITY_WEIGHTS[priority];
const avgWeight =
(PRIORITY_WEIGHTS.critical +
PRIORITY_WEIGHTS.high +
PRIORITY_WEIGHTS.normal +
PRIORITY_WEIGHTS.low) /
4;
const positionFactor = avgWeight / priorityWeight;
return Math.ceil(state.waiting * positionFactor);
};

View File

@@ -0,0 +1,280 @@
/**
* Result Aggregator
*
* Merges and deduplicates results from parallel task execution.
* Supports various aggregation strategies based on task type.
*/
import { DEDUP_CONFIG } from "@constants/parallel";
import type {
ParallelExecutionResult,
AggregatedResults,
DeduplicationKey,
DeduplicationResult,
} from "@/types/parallel";
// ============================================================================
// Result Collection
// ============================================================================
/**
* Collect results into aggregated structure
*/
export const collectResults = <TOutput>(
results: ParallelExecutionResult<TOutput>[],
): AggregatedResults<TOutput> => {
const successful = results.filter((r) => r.status === "completed").length;
const failed = results.filter((r) => r.status === "error" || r.status === "timeout").length;
const cancelled = results.filter((r) => r.status === "cancelled").length;
const totalDuration = results.reduce((sum, r) => sum + r.duration, 0);
return {
results,
successful,
failed,
cancelled,
totalDuration,
};
};
// ============================================================================
// Deduplication
// ============================================================================
/**
* Create a deduplication key from an object
*/
export const createDeduplicationKey = <T>(
item: T,
keyExtractor: (item: T) => DeduplicationKey,
): string => {
const key = keyExtractor(item);
return JSON.stringify(key);
};
/**
* Deduplicate results based on a key extractor
*/
export const deduplicateResults = <T>(
items: T[],
keyExtractor: (item: T) => DeduplicationKey,
): DeduplicationResult<T> => {
const seen = new Map<string, T>();
let duplicateCount = 0;
let mergedCount = 0;
for (const item of items) {
const key = createDeduplicationKey(item, keyExtractor);
if (seen.has(key)) {
duplicateCount++;
// Could implement merging logic here if needed
} else {
seen.set(key, item);
}
}
return {
unique: Array.from(seen.values()),
duplicateCount,
mergedCount,
};
};
/**
* Deduplicate file results (by path)
*/
export const deduplicateFileResults = (
results: Array<{ path: string; content?: string }>,
): DeduplicationResult<{ path: string; content?: string }> => {
return deduplicateResults(results, (item) => ({
type: "file",
path: item.path,
}));
};
/**
* Deduplicate search results (by path and content)
*/
export const deduplicateSearchResults = <T extends { path: string; match?: string }>(
results: T[],
): DeduplicationResult<T> => {
return deduplicateResults(results, (item) => ({
type: "search",
path: item.path,
content: item.match,
}));
};
// ============================================================================
// Result Merging
// ============================================================================
/**
* Merge multiple arrays of results
*/
export const mergeArrayResults = <T>(arrays: T[][]): T[] => {
return arrays.flat();
};
/**
* Merge object results (shallow merge)
*/
export const mergeObjectResults = <T extends Record<string, unknown>>(
objects: T[],
): T => {
return objects.reduce((acc, obj) => ({ ...acc, ...obj }), {} as T);
};
/**
* Merge results by priority (later results override earlier)
*/
export const mergeByPriority = <T>(
results: ParallelExecutionResult<T>[],
): T | undefined => {
// Sort by completion time (most recent last)
const sorted = [...results].sort((a, b) => a.completedAt - b.completedAt);
// Return the most recent successful result
const successful = sorted.filter((r) => r.status === "completed" && r.result !== undefined);
return successful.length > 0 ? successful[successful.length - 1].result : undefined;
};
// ============================================================================
// Content Similarity
// ============================================================================
/**
* Calculate similarity between two strings using Jaccard index
*/
const calculateSimilarity = (a: string, b: string): number => {
if (a === b) return 1;
if (!a || !b) return 0;
const aTokens = new Set(a.toLowerCase().split(/\s+/));
const bTokens = new Set(b.toLowerCase().split(/\s+/));
const intersection = [...aTokens].filter((token) => bTokens.has(token));
const union = new Set([...aTokens, ...bTokens]);
return intersection.length / union.size;
};
/**
* Find similar results based on content
*/
export const findSimilarResults = <T>(
items: T[],
contentExtractor: (item: T) => string,
threshold: number = DEDUP_CONFIG.SIMILARITY_THRESHOLD,
): Map<T, T[]> => {
const similarGroups = new Map<T, T[]>();
for (let i = 0; i < items.length; i++) {
const item = items[i];
const content = contentExtractor(item);
const similar: T[] = [];
for (let j = i + 1; j < items.length; j++) {
const otherItem = items[j];
const otherContent = contentExtractor(otherItem);
const similarity = calculateSimilarity(content, otherContent);
if (similarity >= threshold) {
similar.push(otherItem);
}
}
if (similar.length > 0) {
similarGroups.set(item, similar);
}
}
return similarGroups;
};
// ============================================================================
// Aggregation Strategies
// ============================================================================
/**
* Aggregate results as a list (concatenate all)
*/
export const aggregateAsList = <T>(
results: ParallelExecutionResult<T[]>[],
): T[] => {
const arrays = results
.filter((r) => r.status === "completed" && r.result)
.map((r) => r.result!);
return mergeArrayResults(arrays);
};
/**
* Aggregate results as a map (by key)
*/
export const aggregateAsMap = <T extends Record<string, unknown>>(
results: ParallelExecutionResult<T>[],
keyExtractor: (result: T) => string,
): Map<string, T> => {
const map = new Map<string, T>();
for (const result of results) {
if (result.status === "completed" && result.result) {
const key = keyExtractor(result.result);
map.set(key, result.result);
}
}
return map;
};
/**
* Aggregate results and return first non-empty
*/
export const aggregateFirstNonEmpty = <T>(
results: ParallelExecutionResult<T>[],
): T | undefined => {
const successful = results
.filter((r) => r.status === "completed" && r.result !== undefined)
.sort((a, b) => a.completedAt - b.completedAt);
return successful.length > 0 ? successful[0].result : undefined;
};
/**
* Aggregate numeric results (sum)
*/
export const aggregateSum = (
results: ParallelExecutionResult<number>[],
): number => {
return results
.filter((r) => r.status === "completed" && typeof r.result === "number")
.reduce((sum, r) => sum + r.result!, 0);
};
/**
* Aggregate boolean results (all true)
*/
export const aggregateAll = (
results: ParallelExecutionResult<boolean>[],
): boolean => {
const completed = results.filter(
(r) => r.status === "completed" && typeof r.result === "boolean",
);
return completed.length > 0 && completed.every((r) => r.result === true);
};
/**
* Aggregate boolean results (any true)
*/
export const aggregateAny = (
results: ParallelExecutionResult<boolean>[],
): boolean => {
return results.some(
(r) => r.status === "completed" && r.result === true,
);
};

View File

@@ -0,0 +1,309 @@
/**
* Diff Parser
*
* Parses unified diff format for PR review analysis.
*/
import type { ParsedDiff, ParsedFileDiff, DiffHunk } from "@/types/pr-review";
/**
* Diff parsing patterns
*/
const PATTERNS = {
FILE_HEADER: /^diff --git a\/(.+) b\/(.+)$/,
OLD_FILE: /^--- (.+?)(?:\t.*)?$/,
NEW_FILE: /^\+\+\+ (.+?)(?:\t.*)?$/,
HUNK_HEADER: /^@@ -(\d+)(?:,(\d+))? \+(\d+)(?:,(\d+))? @@(.*)$/,
BINARY: /^Binary files .+ differ$/,
NEW_FILE_MODE: /^new file mode \d+$/,
DELETED_FILE_MODE: /^deleted file mode \d+$/,
RENAME_FROM: /^rename from (.+)$/,
RENAME_TO: /^rename to (.+)$/,
} as const;
/**
* Parse unified diff content
*/
export const parseDiff = (diffContent: string): ParsedDiff => {
const lines = diffContent.split("\n");
const files: ParsedFileDiff[] = [];
let currentFile: ParsedFileDiff | null = null;
let currentHunk: DiffHunk | null = null;
let lineIndex = 0;
while (lineIndex < lines.length) {
const line = lines[lineIndex];
// Git diff header
const gitDiffMatch = line.match(PATTERNS.FILE_HEADER);
if (gitDiffMatch) {
if (currentFile) {
if (currentHunk) {
currentFile.hunks.push(currentHunk);
}
files.push(currentFile);
}
currentFile = createEmptyFileDiff(gitDiffMatch[1], gitDiffMatch[2]);
currentHunk = null;
lineIndex++;
continue;
}
// Old file header
const oldFileMatch = line.match(PATTERNS.OLD_FILE);
if (oldFileMatch) {
if (!currentFile) {
currentFile = createEmptyFileDiff("", "");
}
currentFile.oldPath = cleanPath(oldFileMatch[1]);
if (currentFile.oldPath === "/dev/null") {
currentFile.isNew = true;
}
lineIndex++;
continue;
}
// New file header
const newFileMatch = line.match(PATTERNS.NEW_FILE);
if (newFileMatch) {
if (!currentFile) {
currentFile = createEmptyFileDiff("", "");
}
currentFile.newPath = cleanPath(newFileMatch[1]);
if (currentFile.newPath === "/dev/null") {
currentFile.isDeleted = true;
}
lineIndex++;
continue;
}
// Binary file
if (PATTERNS.BINARY.test(line)) {
if (currentFile) {
currentFile.isBinary = true;
}
lineIndex++;
continue;
}
// New file mode
if (PATTERNS.NEW_FILE_MODE.test(line)) {
if (currentFile) {
currentFile.isNew = true;
}
lineIndex++;
continue;
}
// Deleted file mode
if (PATTERNS.DELETED_FILE_MODE.test(line)) {
if (currentFile) {
currentFile.isDeleted = true;
}
lineIndex++;
continue;
}
// Rename from
const renameFromMatch = line.match(PATTERNS.RENAME_FROM);
if (renameFromMatch) {
if (currentFile) {
currentFile.isRenamed = true;
currentFile.oldPath = cleanPath(renameFromMatch[1]);
}
lineIndex++;
continue;
}
// Rename to
const renameToMatch = line.match(PATTERNS.RENAME_TO);
if (renameToMatch) {
if (currentFile) {
currentFile.newPath = cleanPath(renameToMatch[1]);
}
lineIndex++;
continue;
}
// Hunk header
const hunkMatch = line.match(PATTERNS.HUNK_HEADER);
if (hunkMatch) {
if (currentHunk && currentFile) {
currentFile.hunks.push(currentHunk);
}
currentHunk = {
oldStart: parseInt(hunkMatch[1], 10),
oldLines: hunkMatch[2] ? parseInt(hunkMatch[2], 10) : 1,
newStart: parseInt(hunkMatch[3], 10),
newLines: hunkMatch[4] ? parseInt(hunkMatch[4], 10) : 1,
content: line,
additions: [],
deletions: [],
context: [],
};
lineIndex++;
continue;
}
// Content lines
if (currentHunk) {
if (line.startsWith("+") && !line.startsWith("+++")) {
currentHunk.additions.push(line.slice(1));
if (currentFile) currentFile.additions++;
} else if (line.startsWith("-") && !line.startsWith("---")) {
currentHunk.deletions.push(line.slice(1));
if (currentFile) currentFile.deletions++;
} else if (line.startsWith(" ") || line === "") {
currentHunk.context.push(line.slice(1) || "");
}
}
lineIndex++;
}
// Push final hunk and file
if (currentHunk && currentFile) {
currentFile.hunks.push(currentHunk);
}
if (currentFile) {
files.push(currentFile);
}
// Calculate totals
const totalAdditions = files.reduce((sum, f) => sum + f.additions, 0);
const totalDeletions = files.reduce((sum, f) => sum + f.deletions, 0);
return {
files,
totalAdditions,
totalDeletions,
totalFiles: files.length,
};
};
/**
* Create empty file diff structure
*/
const createEmptyFileDiff = (oldPath: string, newPath: string): ParsedFileDiff => ({
oldPath: cleanPath(oldPath),
newPath: cleanPath(newPath),
hunks: [],
additions: 0,
deletions: 0,
isBinary: false,
isNew: false,
isDeleted: false,
isRenamed: false,
});
/**
* Clean path by removing a/ or b/ prefixes
*/
const cleanPath = (path: string): string => {
if (path.startsWith("a/")) return path.slice(2);
if (path.startsWith("b/")) return path.slice(2);
return path;
};
/**
* Get the effective path for a file diff
*/
export const getFilePath = (fileDiff: ParsedFileDiff): string => {
if (fileDiff.isNew) return fileDiff.newPath;
if (fileDiff.isDeleted) return fileDiff.oldPath;
return fileDiff.newPath || fileDiff.oldPath;
};
/**
* Filter files by pattern
*/
export const filterFiles = (
files: ParsedFileDiff[],
excludePatterns: string[],
): ParsedFileDiff[] => {
return files.filter((file) => {
const path = getFilePath(file);
return !excludePatterns.some((pattern) => matchPattern(path, pattern));
});
};
/**
* Simple glob pattern matching
*/
const matchPattern = (path: string, pattern: string): boolean => {
// Convert glob to regex
const regexPattern = pattern
.replace(/\*\*/g, ".*")
.replace(/\*/g, "[^/]*")
.replace(/\?/g, ".");
const regex = new RegExp(`^${regexPattern}$`);
return regex.test(path);
};
/**
* Get added lines with line numbers
*/
export const getAddedLines = (
fileDiff: ParsedFileDiff,
): Array<{ line: number; content: string }> => {
const result: Array<{ line: number; content: string }> = [];
for (const hunk of fileDiff.hunks) {
let lineNumber = hunk.newStart;
for (const addition of hunk.additions) {
result.push({ line: lineNumber, content: addition });
lineNumber++;
}
}
return result;
};
/**
* Get hunk context (surrounding code)
*/
export const getHunkContext = (
hunk: DiffHunk,
contextLines: number = 3,
): string => {
const lines: string[] = [];
// Context before
const beforeContext = hunk.context.slice(0, contextLines);
for (const ctx of beforeContext) {
lines.push(` ${ctx}`);
}
// Changes
for (const del of hunk.deletions) {
lines.push(`-${del}`);
}
for (const add of hunk.additions) {
lines.push(`+${add}`);
}
// Context after
const afterContext = hunk.context.slice(-contextLines);
for (const ctx of afterContext) {
lines.push(` ${ctx}`);
}
return lines.join("\n");
};
/**
* Get diff statistics
*/
export const getDiffStats = (
diff: ParsedDiff,
): { files: number; additions: number; deletions: number; summary: string } => {
return {
files: diff.totalFiles,
additions: diff.totalAdditions,
deletions: diff.totalDeletions,
summary: `${diff.totalFiles} file(s), +${diff.totalAdditions}/-${diff.totalDeletions}`,
};
};

View File

@@ -0,0 +1,215 @@
/**
* PR Review Service
*
* Main orchestrator for multi-agent code review.
*/
import {
DEFAULT_REVIEW_CONFIG,
PR_REVIEW_ERRORS,
PR_REVIEW_MESSAGES,
} from "@constants/pr-review";
import { parseDiff, filterFiles, getFilePath } from "@services/pr-review/diff-parser";
import { generateReport, formatReportMarkdown } from "@services/pr-review/report-generator";
import * as securityReviewer from "@services/pr-review/reviewers/security";
import * as performanceReviewer from "@services/pr-review/reviewers/performance";
import * as logicReviewer from "@services/pr-review/reviewers/logic";
import * as styleReviewer from "@services/pr-review/reviewers/style";
import type {
PRReviewReport,
PRReviewRequest,
PRReviewConfig,
ReviewerResult,
ParsedDiff,
ReviewFileContext,
} from "@/types/pr-review";
// Re-export utilities
export * from "@services/pr-review/diff-parser";
export * from "@services/pr-review/report-generator";
// Reviewer map
const reviewers = {
security: securityReviewer,
performance: performanceReviewer,
logic: logicReviewer,
style: styleReviewer,
} as const;
/**
* Run a complete PR review
*/
export const reviewPR = async (
diffContent: string,
request: PRReviewRequest = {},
options: {
onProgress?: (message: string) => void;
abortSignal?: AbortSignal;
} = {},
): Promise<PRReviewReport> => {
const config = { ...DEFAULT_REVIEW_CONFIG, ...request.config };
options.onProgress?.(PR_REVIEW_MESSAGES.STARTING);
// Parse diff
options.onProgress?.(PR_REVIEW_MESSAGES.PARSING_DIFF);
const diff = parseDiff(diffContent);
if (diff.files.length === 0) {
throw new Error(PR_REVIEW_ERRORS.NO_FILES);
}
// Filter files
const filteredFiles = filterFiles(diff.files, config.excludePatterns);
if (filteredFiles.length === 0) {
throw new Error(PR_REVIEW_ERRORS.EXCLUDED_ALL);
}
// Create filtered diff
const filteredDiff: ParsedDiff = {
files: filteredFiles,
totalAdditions: filteredFiles.reduce((sum, f) => sum + f.additions, 0),
totalDeletions: filteredFiles.reduce((sum, f) => sum + f.deletions, 0),
totalFiles: filteredFiles.length,
};
// Run reviewers in parallel
const reviewerResults = await runReviewers(
filteredDiff,
config,
options.onProgress,
options.abortSignal,
);
// Generate report
const report = generateReport(reviewerResults, filteredDiff, {
baseBranch: request.baseBranch ?? "main",
headBranch: request.headBranch ?? "HEAD",
commitRange: `${request.baseBranch ?? "main"}...${request.headBranch ?? "HEAD"}`,
});
options.onProgress?.(PR_REVIEW_MESSAGES.COMPLETED(report.findings.length));
return report;
};
/**
* Run all enabled reviewers
*/
const runReviewers = async (
diff: ParsedDiff,
config: PRReviewConfig,
onProgress?: (message: string) => void,
abortSignal?: AbortSignal,
): Promise<ReviewerResult[]> => {
const results: ReviewerResult[] = [];
const enabledReviewers = config.reviewers.filter((r) => r.enabled);
// Run reviewers in parallel
const promises = enabledReviewers.map(async (reviewerConfig) => {
if (abortSignal?.aborted) {
return {
reviewer: reviewerConfig.name,
findings: [],
duration: 0,
error: "Aborted",
};
}
onProgress?.(PR_REVIEW_MESSAGES.REVIEWING(reviewerConfig.name));
const startTime = Date.now();
const reviewerModule = reviewers[reviewerConfig.name as keyof typeof reviewers];
if (!reviewerModule) {
return {
reviewer: reviewerConfig.name,
findings: [],
duration: 0,
error: `Unknown reviewer: ${reviewerConfig.name}`,
};
}
try {
const findings = [];
for (const fileDiff of diff.files) {
const fileContext: ReviewFileContext = {
path: getFilePath(fileDiff),
diff: fileDiff,
};
const fileFindings = reviewerModule.reviewFile(fileContext);
findings.push(...fileFindings);
}
return {
reviewer: reviewerConfig.name,
findings,
duration: Date.now() - startTime,
};
} catch (error) {
const message = error instanceof Error ? error.message : String(error);
return {
reviewer: reviewerConfig.name,
findings: [],
duration: Date.now() - startTime,
error: message,
};
}
});
const parallelResults = await Promise.all(promises);
results.push(...parallelResults);
return results;
};
/**
* Run a quick review (only critical checks)
*/
export const quickReview = async (
diffContent: string,
options: {
onProgress?: (message: string) => void;
} = {},
): Promise<PRReviewReport> => {
return reviewPR(
diffContent,
{
config: {
reviewers: [
{ name: "security", type: "security", enabled: true, minConfidence: 90 },
{ name: "logic", type: "logic", enabled: true, minConfidence: 90 },
],
},
},
options,
);
};
/**
* Get review report as markdown
*/
export const getReportMarkdown = (report: PRReviewReport): string => {
return formatReportMarkdown(report);
};
/**
* Create a review summary for commit messages
*/
export const createReviewSummary = (report: PRReviewReport): string => {
const parts: string[] = [];
parts.push(`Review: ${report.rating}/5 stars`);
if (report.findingsBySeverity.critical > 0) {
parts.push(`${report.findingsBySeverity.critical} critical issue(s)`);
}
if (report.findingsBySeverity.warning > 0) {
parts.push(`${report.findingsBySeverity.warning} warning(s)`);
}
return parts.join(", ");
};

View File

@@ -0,0 +1,410 @@
/**
* Report Generator
*
* Aggregates findings and generates the review report.
*/
import {
DEFAULT_REVIEW_CONFIG,
SEVERITY_ICONS,
SEVERITY_LABELS,
FINDING_TYPE_LABELS,
RATING_THRESHOLDS,
RECOMMENDATION_THRESHOLDS,
PR_REVIEW_TITLES,
} from "@constants/pr-review";
import type {
PRReviewFinding,
PRReviewReport,
ReviewerResult,
ReviewRating,
ReviewRecommendation,
ReviewSeverity,
ReviewFindingType,
ParsedDiff,
} from "@/types/pr-review";
/**
* Generate a complete review report
*/
export const generateReport = (
reviewerResults: ReviewerResult[],
diff: ParsedDiff,
options: {
baseBranch: string;
headBranch: string;
commitRange: string;
},
): PRReviewReport => {
// Collect all findings
const allFindings = aggregateFindings(reviewerResults);
// Filter by confidence threshold
const findings = filterByConfidence(
allFindings,
DEFAULT_REVIEW_CONFIG.minConfidence,
);
// Limit total findings
const limitedFindings = limitFindings(
findings,
DEFAULT_REVIEW_CONFIG.maxFindings,
);
// Calculate statistics
const findingsBySeverity = countBySeverity(limitedFindings);
const findingsByType = countByType(limitedFindings);
// Calculate rating and recommendation
const rating = calculateRating(findingsBySeverity);
const recommendation = calculateRecommendation(findingsBySeverity);
// Generate summary
const summary = generateSummary(limitedFindings, rating, recommendation);
// Calculate duration
const duration = reviewerResults.reduce((sum, r) => sum + r.duration, 0);
return {
id: generateReportId(),
timestamp: Date.now(),
duration,
baseBranch: options.baseBranch,
headBranch: options.headBranch,
commitRange: options.commitRange,
filesChanged: diff.totalFiles,
additions: diff.totalAdditions,
deletions: diff.totalDeletions,
findings: limitedFindings,
findingsBySeverity,
findingsByType,
reviewerResults,
rating,
recommendation,
summary,
};
};
/**
* Aggregate findings from all reviewers
*/
const aggregateFindings = (results: ReviewerResult[]): PRReviewFinding[] => {
const allFindings: PRReviewFinding[] = [];
for (const result of results) {
allFindings.push(...result.findings);
}
// Sort by severity (critical first) then by file
return allFindings.sort((a, b) => {
const severityOrder: Record<ReviewSeverity, number> = {
critical: 0,
warning: 1,
suggestion: 2,
nitpick: 3,
};
const severityDiff = severityOrder[a.severity] - severityOrder[b.severity];
if (severityDiff !== 0) return severityDiff;
return a.file.localeCompare(b.file);
});
};
/**
* Filter findings by confidence threshold
*/
const filterByConfidence = (
findings: PRReviewFinding[],
minConfidence: number,
): PRReviewFinding[] => {
return findings.filter((f) => f.confidence >= minConfidence);
};
/**
* Limit total number of findings
*/
const limitFindings = (
findings: PRReviewFinding[],
maxFindings: number,
): PRReviewFinding[] => {
if (findings.length <= maxFindings) return findings;
// Prioritize critical and warning findings
const critical = findings.filter((f) => f.severity === "critical");
const warnings = findings.filter((f) => f.severity === "warning");
const suggestions = findings.filter((f) => f.severity === "suggestion");
const nitpicks = findings.filter((f) => f.severity === "nitpick");
const result: PRReviewFinding[] = [];
// Add all critical findings
result.push(...critical);
// Add warnings up to limit
const remainingWarnings = maxFindings - result.length;
result.push(...warnings.slice(0, remainingWarnings));
// Add suggestions if room
const remainingSuggestions = maxFindings - result.length;
result.push(...suggestions.slice(0, remainingSuggestions));
// Add nitpicks if room
const remainingNitpicks = maxFindings - result.length;
result.push(...nitpicks.slice(0, remainingNitpicks));
return result;
};
/**
* Count findings by severity
*/
const countBySeverity = (
findings: PRReviewFinding[],
): Record<ReviewSeverity, number> => {
const counts: Record<ReviewSeverity, number> = {
critical: 0,
warning: 0,
suggestion: 0,
nitpick: 0,
};
for (const finding of findings) {
counts[finding.severity]++;
}
return counts;
};
/**
* Count findings by type
*/
const countByType = (
findings: PRReviewFinding[],
): Record<ReviewFindingType, number> => {
const counts: Record<ReviewFindingType, number> = {
security: 0,
performance: 0,
style: 0,
logic: 0,
documentation: 0,
testing: 0,
};
for (const finding of findings) {
counts[finding.type]++;
}
return counts;
};
/**
* Calculate overall rating (1-5 stars)
*/
const calculateRating = (
bySeverity: Record<ReviewSeverity, number>,
): ReviewRating => {
for (const rating of [5, 4, 3, 2, 1] as const) {
const threshold = RATING_THRESHOLDS[rating];
if (
bySeverity.critical <= threshold.maxCritical &&
bySeverity.warning <= threshold.maxWarning
) {
return rating;
}
}
return 1;
};
/**
* Calculate recommendation
*/
const calculateRecommendation = (
bySeverity: Record<ReviewSeverity, number>,
): ReviewRecommendation => {
if (
bySeverity.critical === 0 &&
bySeverity.warning === 0 &&
bySeverity.suggestion <= RECOMMENDATION_THRESHOLDS.approve.maxSuggestion
) {
return "approve";
}
if (
bySeverity.critical === 0 &&
bySeverity.warning <= RECOMMENDATION_THRESHOLDS.approve_with_suggestions.maxWarning
) {
return "approve_with_suggestions";
}
if (bySeverity.critical >= 1) {
return "request_changes";
}
return "needs_discussion";
};
/**
* Generate summary text
*/
const generateSummary = (
findings: PRReviewFinding[],
_rating: ReviewRating,
recommendation: ReviewRecommendation,
): string => {
if (findings.length === 0) {
return "No significant issues found. Code looks good!";
}
const parts: string[] = [];
// Count by severity
const critical = findings.filter((f) => f.severity === "critical").length;
const warnings = findings.filter((f) => f.severity === "warning").length;
const suggestions = findings.filter((f) => f.severity === "suggestion").length;
if (critical > 0) {
parts.push(`${critical} critical issue(s) must be addressed`);
}
if (warnings > 0) {
parts.push(`${warnings} warning(s) should be reviewed`);
}
if (suggestions > 0) {
parts.push(`${suggestions} suggestion(s) for improvement`);
}
// Add recommendation context
const recommendationText: Record<ReviewRecommendation, string> = {
approve: "",
approve_with_suggestions:
"Changes can be merged after addressing suggestions.",
request_changes: "Critical issues must be fixed before merging.",
needs_discussion: "Some items need clarification or discussion.",
};
if (recommendationText[recommendation]) {
parts.push(recommendationText[recommendation]);
}
return parts.join(". ");
};
/**
* Format report as markdown
*/
export const formatReportMarkdown = (report: PRReviewReport): string => {
const lines: string[] = [];
// Header
lines.push(`## ${PR_REVIEW_TITLES.REPORT}`);
lines.push("");
// Summary stats
lines.push("### ${PR_REVIEW_TITLES.SUMMARY}");
lines.push("");
lines.push(`| Metric | Value |`);
lines.push(`|--------|-------|`);
lines.push(`| Files Changed | ${report.filesChanged} |`);
lines.push(`| Additions | +${report.additions} |`);
lines.push(`| Deletions | -${report.deletions} |`);
lines.push(`| Findings | ${report.findings.length} |`);
lines.push("");
// Findings by severity
lines.push("| Severity | Count |");
lines.push("|----------|-------|");
for (const severity of ["critical", "warning", "suggestion", "nitpick"] as const) {
const count = report.findingsBySeverity[severity];
if (count > 0) {
lines.push(
`| ${SEVERITY_ICONS[severity]} ${SEVERITY_LABELS[severity]} | ${count} |`,
);
}
}
lines.push("");
// Rating
const stars = "⭐".repeat(report.rating);
lines.push(`**Rating:** ${stars} (${report.rating}/5)`);
lines.push("");
// Recommendation
const recommendationEmoji: Record<ReviewRecommendation, string> = {
approve: "✅",
approve_with_suggestions: "✅",
request_changes: "🔴",
needs_discussion: "💬",
};
lines.push(
`**${PR_REVIEW_TITLES.RECOMMENDATION}:** ${recommendationEmoji[report.recommendation]} ${formatRecommendation(report.recommendation)}`,
);
lines.push("");
lines.push(report.summary);
lines.push("");
// Findings
if (report.findings.length > 0) {
lines.push(`### ${PR_REVIEW_TITLES.FINDINGS}`);
lines.push("");
for (const finding of report.findings) {
lines.push(formatFinding(finding));
lines.push("");
}
}
return lines.join("\n");
};
/**
* Format recommendation for display
*/
const formatRecommendation = (recommendation: ReviewRecommendation): string => {
const labels: Record<ReviewRecommendation, string> = {
approve: "Approve",
approve_with_suggestions: "Approve with Suggestions",
request_changes: "Request Changes",
needs_discussion: "Needs Discussion",
};
return labels[recommendation];
};
/**
* Format a single finding
*/
const formatFinding = (finding: PRReviewFinding): string => {
const lines: string[] = [];
lines.push(
`${SEVERITY_ICONS[finding.severity]} **[${SEVERITY_LABELS[finding.severity]}]** ${FINDING_TYPE_LABELS[finding.type]}: ${finding.message}`,
);
lines.push("");
lines.push(`📍 \`${finding.file}${finding.line ? `:${finding.line}` : ""}\``);
if (finding.details) {
lines.push("");
lines.push(`**Issue:** ${finding.details}`);
}
if (finding.suggestion) {
lines.push("");
lines.push(`**Suggestion:** ${finding.suggestion}`);
}
lines.push("");
lines.push("---");
return lines.join("\n");
};
/**
* Generate report ID
*/
const generateReportId = (): string => {
return `review_${Date.now()}_${Math.random().toString(36).substr(2, 9)}`;
};

View File

@@ -0,0 +1,240 @@
/**
* Logic Reviewer
*
* Analyzes code for logical errors and edge cases.
*/
import { MIN_CONFIDENCE_THRESHOLD, REVIEWER_PROMPTS } from "@constants/pr-review";
import type {
PRReviewFinding,
ParsedFileDiff,
ReviewFileContext,
} from "@/types/pr-review";
/**
* Logic patterns to check
*/
const LOGIC_PATTERNS = {
MISSING_NULL_CHECK: {
patterns: [
/\w+\.\w+\.\w+/, // Deep property access without optional chaining
/(\w+)\[['"][^'"]+['"]\]\.\w+/, // Object property followed by method
],
message: "Potential null/undefined reference",
suggestion: "Use optional chaining (?.) or add null checks",
confidence: 70,
},
OPTIONAL_CHAIN_MISSING: {
patterns: [
/if\s*\([^)]*\)\s*\{[^}]*\w+\./, // Variable used after if check without ?.
],
message: "Consider using optional chaining",
suggestion: "Replace conditional access with ?. operator",
confidence: 65,
},
EMPTY_CATCH: {
patterns: [
/catch\s*\([^)]*\)\s*\{\s*\}/,
/catch\s*\{\s*\}/,
],
message: "Empty catch block - errors silently ignored",
suggestion: "Log the error or handle it appropriately",
confidence: 90,
},
UNHANDLED_PROMISE: {
patterns: [
/\basync\s+\w+\s*\([^)]*\)\s*\{[^}]*(?!try)[^}]*await\s+[^}]*\}/,
],
message: "Async function without try-catch",
suggestion: "Wrap await calls in try-catch or use .catch()",
confidence: 70,
},
FLOATING_PROMISE: {
patterns: [
/^\s*\w+\s*\.\s*then\s*\(/m,
/^\s*\w+\([^)]*\)\.then\s*\(/m,
],
message: "Floating promise - missing await or error handling",
suggestion: "Use await or add .catch() for error handling",
confidence: 80,
},
ARRAY_INDEX_ACCESS: {
patterns: [
/\[\d+\]/,
/\[0\]/,
/\[-1\]/,
],
message: "Direct array index access without bounds check",
suggestion: "Consider using .at() or add bounds checking",
confidence: 60,
},
EQUALITY_TYPE_COERCION: {
patterns: [
/[^=!]==[^=]/,
/[^!]!=[^=]/,
],
message: "Using == instead of === (type coercion)",
suggestion: "Use strict equality (===) to avoid type coercion bugs",
confidence: 85,
},
ASYNC_IN_FOREACH: {
patterns: [
/\.forEach\s*\(\s*async/,
],
message: "Async callback in forEach - won't await properly",
suggestion: "Use for...of loop or Promise.all with .map()",
confidence: 90,
},
MUTATING_PARAMETER: {
patterns: [
/function\s+\w+\s*\(\w+\)\s*\{[^}]*\w+\s*\.\s*\w+\s*=/,
/\(\w+\)\s*=>\s*\{[^}]*\w+\s*\.\s*push/,
],
message: "Mutating function parameter",
suggestion: "Create a copy before mutating or use immutable patterns",
confidence: 75,
},
RACE_CONDITION: {
patterns: [
/let\s+\w+\s*=[^;]+;\s*await\s+[^;]+;\s*\w+\s*=/,
],
message: "Potential race condition with shared state",
suggestion: "Use atomic operations or proper synchronization",
confidence: 70,
},
INFINITE_LOOP_RISK: {
patterns: [
/while\s*\(\s*true\s*\)/,
/for\s*\(\s*;\s*;\s*\)/,
],
message: "Infinite loop without clear exit condition",
suggestion: "Ensure there's a clear break condition",
confidence: 75,
},
} as const;
/**
* Run logic review on a file
*/
export const reviewFile = (
fileContext: ReviewFileContext,
): PRReviewFinding[] => {
const findings: PRReviewFinding[] = [];
const { diff, path } = fileContext;
// Get all added lines
const addedLines = getAllAddedLines(diff);
// Check each pattern
for (const [patternName, config] of Object.entries(LOGIC_PATTERNS)) {
// Skip patterns below threshold
if (config.confidence < MIN_CONFIDENCE_THRESHOLD) {
continue;
}
for (const { content, lineNumber } of addedLines) {
for (const pattern of config.patterns) {
if (pattern.test(content)) {
findings.push({
id: generateFindingId(),
type: "logic",
severity: determineSeverity(config.confidence),
file: path,
line: lineNumber,
message: config.message,
details: `Pattern: ${patternName}`,
suggestion: config.suggestion,
confidence: config.confidence,
reviewer: "logic",
});
break;
}
}
}
}
// Deduplicate similar findings
return deduplicateFindings(findings);
};
/**
* Determine severity based on confidence
*/
const determineSeverity = (
confidence: number,
): "critical" | "warning" | "suggestion" => {
if (confidence >= 90) return "critical";
if (confidence >= 80) return "warning";
return "suggestion";
};
/**
* Get all added lines with line numbers
*/
const getAllAddedLines = (
diff: ParsedFileDiff,
): Array<{ content: string; lineNumber: number }> => {
const lines: Array<{ content: string; lineNumber: number }> = [];
for (const hunk of diff.hunks) {
let lineNumber = hunk.newStart;
for (const addition of hunk.additions) {
lines.push({
content: addition,
lineNumber,
});
lineNumber++;
}
}
return lines;
};
/**
* Deduplicate findings with same message on adjacent lines
*/
const deduplicateFindings = (findings: PRReviewFinding[]): PRReviewFinding[] => {
const seen = new Map<string, PRReviewFinding>();
for (const finding of findings) {
const key = `${finding.file}:${finding.message}`;
const existing = seen.get(key);
if (!existing) {
seen.set(key, finding);
} else if (finding.line && existing.line) {
// Keep finding with more specific line number
if (Math.abs(finding.line - existing.line) > 5) {
// Different location, keep both
seen.set(`${key}:${finding.line}`, finding);
}
}
}
return Array.from(seen.values());
};
/**
* Generate unique finding ID
*/
const generateFindingId = (): string => {
return `logic_${Date.now()}_${Math.random().toString(36).substr(2, 9)}`;
};
/**
* Get reviewer prompt
*/
export const getPrompt = (): string => {
return REVIEWER_PROMPTS.logic;
};

View File

@@ -0,0 +1,208 @@
/**
* Performance Reviewer
*
* Analyzes code for performance issues.
*/
import { MIN_CONFIDENCE_THRESHOLD, REVIEWER_PROMPTS } from "@constants/pr-review";
import type {
PRReviewFinding,
ParsedFileDiff,
ReviewFileContext,
} from "@/types/pr-review";
/**
* Performance patterns to check
*/
const PERFORMANCE_PATTERNS = {
NESTED_LOOPS: {
patterns: [
/for\s*\([^)]+\)\s*\{[^}]*for\s*\([^)]+\)/,
/\.forEach\([^)]+\)[^}]*\.forEach\(/,
/\.map\([^)]+\)[^}]*\.map\(/,
/while\s*\([^)]+\)\s*\{[^}]*while\s*\([^)]+\)/,
],
message: "Nested loops detected - potential O(n²) complexity",
suggestion: "Consider using a Map/Set for O(1) lookups or restructuring the algorithm",
confidence: 75,
},
ARRAY_IN_LOOP: {
patterns: [
/for\s*\([^)]+\)\s*\{[^}]*\.includes\s*\(/,
/for\s*\([^)]+\)\s*\{[^}]*\.indexOf\s*\(/,
/\.forEach\([^)]+\)[^}]*\.includes\s*\(/,
/\.map\([^)]+\)[^}]*\.indexOf\s*\(/,
],
message: "Array search inside loop - O(n²) complexity",
suggestion: "Convert array to Set for O(1) lookups before the loop",
confidence: 85,
},
UNNECESSARY_RERENDER: {
patterns: [
/useEffect\s*\(\s*\([^)]*\)\s*=>\s*\{[^}]*\},\s*\[\s*\]\s*\)/,
/useState\s*\(\s*\{/,
/useState\s*\(\s*\[/,
/style\s*=\s*\{\s*\{/,
],
message: "Potential unnecessary re-render in React component",
suggestion: "Use useMemo/useCallback for objects/arrays, extract styles outside component",
confidence: 70,
},
MISSING_MEMO: {
patterns: [
/export\s+(?:default\s+)?function\s+\w+\s*\([^)]*\)\s*\{[^}]*return\s*\(/,
/const\s+\w+\s*=\s*\([^)]*\)\s*=>\s*\{[^}]*return\s*\(/,
],
message: "Component may benefit from React.memo",
suggestion: "Consider wrapping with React.memo if props rarely change",
confidence: 60, // Below threshold, informational only
},
N_PLUS_ONE_QUERY: {
patterns: [
/for\s*\([^)]+\)\s*\{[^}]*await\s+.*\.(find|query|get)/,
/\.forEach\([^)]+\)[^}]*await\s+.*\.(find|query|get)/,
/\.map\([^)]+\)[^}]*await\s+.*\.(find|query|get)/,
],
message: "Potential N+1 query problem",
suggestion: "Use batch queries or include/join to fetch related data",
confidence: 85,
},
MEMORY_LEAK: {
patterns: [
/setInterval\s*\([^)]+\)/,
/addEventListener\s*\([^)]+\)/,
/subscribe\s*\([^)]+\)/,
],
message: "Potential memory leak - subscription/interval without cleanup",
suggestion: "Ensure cleanup in useEffect return or componentWillUnmount",
confidence: 75,
},
LARGE_BUNDLE: {
patterns: [
/import\s+\*\s+as\s+\w+\s+from\s+['"]lodash['"]/,
/import\s+\w+\s+from\s+['"]moment['"]/,
/require\s*\(\s*['"]lodash['"]\s*\)/,
],
message: "Large library import may increase bundle size",
suggestion: "Use specific imports (lodash/get) or smaller alternatives (date-fns)",
confidence: 80,
},
SYNC_FILE_OPERATION: {
patterns: [
/readFileSync\s*\(/,
/writeFileSync\s*\(/,
/readdirSync\s*\(/,
/existsSync\s*\(/,
],
message: "Synchronous file operation may block event loop",
suggestion: "Use async versions (readFile, writeFile) for better performance",
confidence: 80,
},
} as const;
/**
* Run performance review on a file
*/
export const reviewFile = (
fileContext: ReviewFileContext,
): PRReviewFinding[] => {
const findings: PRReviewFinding[] = [];
const { diff, path } = fileContext;
// Get all added lines
const addedLines = getAllAddedLines(diff);
// Combine lines for multi-line pattern matching
const combinedContent = addedLines.map(l => l.content).join("\n");
// Check each pattern
for (const [patternName, config] of Object.entries(PERFORMANCE_PATTERNS)) {
// Skip patterns below threshold
if (config.confidence < MIN_CONFIDENCE_THRESHOLD) {
continue;
}
for (const pattern of config.patterns) {
// Check in combined content for multi-line patterns
if (pattern.test(combinedContent)) {
// Find the approximate line number
const lineNumber = findPatternLine(addedLines, pattern);
findings.push({
id: generateFindingId(),
type: "performance",
severity: config.confidence >= 85 ? "warning" : "suggestion",
file: path,
line: lineNumber,
message: config.message,
details: `Pattern: ${patternName}`,
suggestion: config.suggestion,
confidence: config.confidence,
reviewer: "performance",
});
break; // One finding per pattern type
}
}
}
return findings;
};
/**
* Get all added lines with line numbers
*/
const getAllAddedLines = (
diff: ParsedFileDiff,
): Array<{ content: string; lineNumber: number }> => {
const lines: Array<{ content: string; lineNumber: number }> = [];
for (const hunk of diff.hunks) {
let lineNumber = hunk.newStart;
for (const addition of hunk.additions) {
lines.push({
content: addition,
lineNumber,
});
lineNumber++;
}
}
return lines;
};
/**
* Find the line number where a pattern matches
*/
const findPatternLine = (
lines: Array<{ content: string; lineNumber: number }>,
pattern: RegExp,
): number | undefined => {
for (const { content, lineNumber } of lines) {
if (pattern.test(content)) {
return lineNumber;
}
}
return lines.length > 0 ? lines[0].lineNumber : undefined;
};
/**
* Generate unique finding ID
*/
const generateFindingId = (): string => {
return `perf_${Date.now()}_${Math.random().toString(36).substr(2, 9)}`;
};
/**
* Get reviewer prompt
*/
export const getPrompt = (): string => {
return REVIEWER_PROMPTS.performance;
};

View File

@@ -0,0 +1,182 @@
/**
* Security Reviewer
*
* Analyzes code for security vulnerabilities.
*/
import { MIN_CONFIDENCE_THRESHOLD, REVIEWER_PROMPTS } from "@constants/pr-review";
import type {
PRReviewFinding,
ParsedFileDiff,
ReviewFileContext,
} from "@/types/pr-review";
/**
* Security patterns to check
*/
const SECURITY_PATTERNS = {
SQL_INJECTION: {
patterns: [
/`SELECT .* FROM .* WHERE .*\$\{/i,
/`INSERT INTO .* VALUES.*\$\{/i,
/`UPDATE .* SET .*\$\{/i,
/`DELETE FROM .* WHERE .*\$\{/i,
/query\s*\(\s*['"`].*\$\{/i,
/execute\s*\(\s*['"`].*\$\{/i,
],
message: "Potential SQL injection vulnerability",
suggestion: "Use parameterized queries or prepared statements",
confidence: 90,
},
XSS: {
patterns: [
/innerHTML\s*=\s*[^"'].*\+/,
/dangerouslySetInnerHTML/,
/document\.write\s*\(/,
/\.html\s*\([^)]*\+/,
/v-html\s*=/,
],
message: "Potential XSS vulnerability",
suggestion: "Sanitize user input before rendering or use text content",
confidence: 85,
},
COMMAND_INJECTION: {
patterns: [
/exec\s*\(\s*['"`].*\$\{/,
/spawn\s*\(\s*['"`].*\$\{/,
/execSync\s*\(\s*['"`].*\$\{/,
/child_process.*\$\{/,
/\$\(.* \+ /,
],
message: "Potential command injection vulnerability",
suggestion: "Avoid string concatenation in shell commands, use argument arrays",
confidence: 90,
},
PATH_TRAVERSAL: {
patterns: [
/readFile\s*\([^)]*\+/,
/readFileSync\s*\([^)]*\+/,
/fs\..*\([^)]*\+.*req\./,
/path\.join\s*\([^)]*req\./,
],
message: "Potential path traversal vulnerability",
suggestion: "Validate and sanitize file paths, use path.normalize",
confidence: 85,
},
SECRETS_EXPOSURE: {
patterns: [
/api[_-]?key\s*[:=]\s*['"][^'"]+['"]/i,
/secret\s*[:=]\s*['"][^'"]+['"]/i,
/password\s*[:=]\s*['"][^'"]+['"]/i,
/token\s*[:=]\s*['"][^'"]+['"]/i,
/private[_-]?key\s*[:=]\s*['"][^'"]+['"]/i,
/Bearer\s+[A-Za-z0-9_-]+/,
],
message: "Potential hardcoded secret",
suggestion: "Use environment variables or a secrets manager",
confidence: 80,
},
INSECURE_RANDOM: {
patterns: [
/Math\.random\s*\(\)/,
],
message: "Insecure random number generation",
suggestion: "Use crypto.randomBytes or crypto.getRandomValues for security-sensitive operations",
confidence: 70,
},
EVAL_USAGE: {
patterns: [
/\beval\s*\(/,
/new\s+Function\s*\(/,
/setTimeout\s*\(\s*['"`]/,
/setInterval\s*\(\s*['"`]/,
],
message: "Dangerous use of eval or dynamic code execution",
suggestion: "Avoid eval and dynamic code execution, use safer alternatives",
confidence: 85,
},
} as const;
/**
* Run security review on a file
*/
export const reviewFile = (
fileContext: ReviewFileContext,
): PRReviewFinding[] => {
const findings: PRReviewFinding[] = [];
const { diff, path } = fileContext;
// Get all added lines
const addedLines = getAllAddedLines(diff);
// Check each pattern
for (const [patternName, config] of Object.entries(SECURITY_PATTERNS)) {
for (const { content, lineNumber } of addedLines) {
for (const pattern of config.patterns) {
if (pattern.test(content)) {
// Only report if confidence meets threshold
if (config.confidence >= MIN_CONFIDENCE_THRESHOLD) {
findings.push({
id: generateFindingId(),
type: "security",
severity: config.confidence >= 90 ? "critical" : "warning",
file: path,
line: lineNumber,
message: config.message,
details: `Found pattern: ${patternName}`,
suggestion: config.suggestion,
confidence: config.confidence,
reviewer: "security",
});
}
break; // One finding per line per pattern type
}
}
}
}
return findings;
};
/**
* Get all added lines with line numbers
*/
const getAllAddedLines = (
diff: ParsedFileDiff,
): Array<{ content: string; lineNumber: number }> => {
const lines: Array<{ content: string; lineNumber: number }> = [];
for (const hunk of diff.hunks) {
let lineNumber = hunk.newStart;
for (const addition of hunk.additions) {
lines.push({
content: addition,
lineNumber,
});
lineNumber++;
}
}
return lines;
};
/**
* Generate unique finding ID
*/
const generateFindingId = (): string => {
return `sec_${Date.now()}_${Math.random().toString(36).substr(2, 9)}`;
};
/**
* Get reviewer prompt
*/
export const getPrompt = (): string => {
return REVIEWER_PROMPTS.security;
};

View File

@@ -0,0 +1,267 @@
/**
* Style Reviewer
*
* Analyzes code for style and consistency issues.
*/
import { MIN_CONFIDENCE_THRESHOLD, REVIEWER_PROMPTS } from "@constants/pr-review";
import type {
PRReviewFinding,
ParsedFileDiff,
ReviewFileContext,
} from "@/types/pr-review";
/**
* Style patterns to check
*/
const STYLE_PATTERNS = {
CONSOLE_LOG: {
patterns: [
/console\.(log|debug|info)\s*\(/,
],
message: "Console statement left in code",
suggestion: "Remove console statements before committing or use a logger",
confidence: 85,
},
TODO_COMMENT: {
patterns: [
/\/\/\s*TODO[:\s]/i,
/\/\/\s*FIXME[:\s]/i,
/\/\/\s*HACK[:\s]/i,
/\/\*\s*TODO[:\s]/i,
],
message: "TODO/FIXME comment found",
suggestion: "Address the TODO or create a tracking issue",
confidence: 70,
},
MAGIC_NUMBER: {
patterns: [
/(?<![a-zA-Z_])(?:86400|3600|60000|1000|24|60|365|1024|2048|4096)(?![a-zA-Z_\d])/,
],
message: "Magic number - consider using a named constant",
suggestion: "Extract to a named constant for better readability",
confidence: 70,
},
LONG_LINE: {
patterns: [
/.{121,}/,
],
message: "Line exceeds 120 characters",
suggestion: "Break long lines for better readability",
confidence: 75,
},
INCONSISTENT_QUOTES: {
patterns: [
/["'][^"']*["']/,
],
message: "Inconsistent quote style",
suggestion: "Use consistent quotes (single or double) throughout the file",
confidence: 60,
},
VAR_DECLARATION: {
patterns: [
/\bvar\s+\w+/,
],
message: "Using 'var' instead of 'let' or 'const'",
suggestion: "Prefer 'const' for immutable values, 'let' for mutable",
confidence: 85,
},
NESTED_TERNARY: {
patterns: [
/\?[^:]+\?[^:]+:/,
],
message: "Nested ternary operator - hard to read",
suggestion: "Use if-else statements or extract to a function",
confidence: 80,
},
CALLBACK_HELL: {
patterns: [
/\)\s*=>\s*\{[^}]*\)\s*=>\s*\{[^}]*\)\s*=>\s*\{/,
/\.then\([^)]+\.then\([^)]+\.then\(/,
],
message: "Deeply nested callbacks - callback hell",
suggestion: "Refactor using async/await or extract functions",
confidence: 80,
},
ANY_TYPE: {
patterns: [
/:\s*any\b/,
/<any>/,
/as\s+any\b/,
],
message: "Using 'any' type reduces type safety",
suggestion: "Use specific types or 'unknown' with type guards",
confidence: 75,
},
SINGLE_LETTER_VAR: {
patterns: [
/\b(?:const|let|var)\s+[a-z]\s*=/,
],
message: "Single-letter variable name",
suggestion: "Use descriptive variable names for clarity",
confidence: 65,
},
COMMENTED_CODE: {
patterns: [
/\/\/\s*(?:const|let|var|function|if|for|while|return)\s+\w+/,
/\/\*\s*(?:const|let|var|function|if|for|while|return)\s+\w+/,
],
message: "Commented out code detected",
suggestion: "Remove commented code - use version control for history",
confidence: 80,
},
DUPLICATE_IMPORT: {
patterns: [
/import\s+\{[^}]+\}\s+from\s+['"]([^'"]+)['"]/,
],
message: "Check for duplicate or unused imports",
suggestion: "Consolidate imports from the same module",
confidence: 60,
},
} as const;
/**
* Run style review on a file
*/
export const reviewFile = (
fileContext: ReviewFileContext,
): PRReviewFinding[] => {
const findings: PRReviewFinding[] = [];
const { diff, path } = fileContext;
// Get all added lines
const addedLines = getAllAddedLines(diff);
// Check each pattern
for (const [patternName, config] of Object.entries(STYLE_PATTERNS)) {
// Skip patterns below threshold
if (config.confidence < MIN_CONFIDENCE_THRESHOLD) {
continue;
}
let foundInFile = false;
for (const { content, lineNumber } of addedLines) {
for (const pattern of config.patterns) {
if (pattern.test(content)) {
// For some patterns, only report once per file
if (shouldReportOncePerFile(patternName)) {
if (!foundInFile) {
findings.push(createFinding(path, lineNumber, config, patternName));
foundInFile = true;
}
} else {
findings.push(createFinding(path, lineNumber, config, patternName));
}
break;
}
}
}
}
// Limit findings per pattern type
return limitFindings(findings, 3);
};
/**
* Check if pattern should only be reported once per file
*/
const shouldReportOncePerFile = (patternName: string): boolean => {
const oncePerFile = new Set([
"INCONSISTENT_QUOTES",
"VAR_DECLARATION",
"ANY_TYPE",
"DUPLICATE_IMPORT",
]);
return oncePerFile.has(patternName);
};
/**
* Create a finding from config
*/
const createFinding = (
path: string,
lineNumber: number,
config: { message: string; suggestion: string; confidence: number },
patternName: string,
): PRReviewFinding => ({
id: generateFindingId(),
type: "style",
severity: config.confidence >= 85 ? "warning" : "nitpick",
file: path,
line: lineNumber,
message: config.message,
details: `Pattern: ${patternName}`,
suggestion: config.suggestion,
confidence: config.confidence,
reviewer: "style",
});
/**
* Limit findings per pattern to avoid noise
*/
const limitFindings = (
findings: PRReviewFinding[],
maxPerPattern: number,
): PRReviewFinding[] => {
const countByMessage = new Map<string, number>();
const result: PRReviewFinding[] = [];
for (const finding of findings) {
const count = countByMessage.get(finding.message) ?? 0;
if (count < maxPerPattern) {
result.push(finding);
countByMessage.set(finding.message, count + 1);
}
}
return result;
};
/**
* Get all added lines with line numbers
*/
const getAllAddedLines = (
diff: ParsedFileDiff,
): Array<{ content: string; lineNumber: number }> => {
const lines: Array<{ content: string; lineNumber: number }> = [];
for (const hunk of diff.hunks) {
let lineNumber = hunk.newStart;
for (const addition of hunk.additions) {
lines.push({
content: addition,
lineNumber,
});
lineNumber++;
}
}
return lines;
};
/**
* Generate unique finding ID
*/
const generateFindingId = (): string => {
return `style_${Date.now()}_${Math.random().toString(36).substr(2, 9)}`;
};
/**
* Get reviewer prompt
*/
export const getPrompt = (): string => {
return REVIEWER_PROMPTS.style;
};

View File

@@ -74,7 +74,37 @@ const MODE_PROMPT_BUILDERS: Record<
};
/**
* Get git context for prompt building
* Execute git command asynchronously
*/
const execGitCommand = (args: string[]): Promise<string> => {
return new Promise((resolve, reject) => {
const { spawn } = require("child_process");
const proc = spawn("git", args, { cwd: process.cwd() });
let stdout = "";
let stderr = "";
proc.stdout.on("data", (data: Buffer) => {
stdout += data.toString();
});
proc.stderr.on("data", (data: Buffer) => {
stderr += data.toString();
});
proc.on("close", (code: number) => {
if (code === 0) {
resolve(stdout.trim());
} else {
reject(new Error(stderr || `git exited with code ${code}`));
}
});
proc.on("error", reject);
});
};
/**
* Get git context for prompt building (async, non-blocking)
*/
export const getGitContext = async (): Promise<{
isGitRepo: boolean;
@@ -83,16 +113,15 @@ export const getGitContext = async (): Promise<{
recentCommits?: string[];
}> => {
try {
const { execSync } = await import("child_process");
const branch = execSync("git branch --show-current", {
encoding: "utf-8",
}).trim();
const status =
execSync("git status --short", { encoding: "utf-8" }).trim() || "(clean)";
const commits = execSync("git log --oneline -5", { encoding: "utf-8" })
.trim()
.split("\n")
.filter(Boolean);
// Run all git commands in parallel for faster execution
const [branch, status, commits] = await Promise.all([
execGitCommand(["branch", "--show-current"]),
execGitCommand(["status", "--short"]).then((s) => s || "(clean)"),
execGitCommand(["log", "--oneline", "-5"]).then((s) =>
s.split("\n").filter(Boolean),
),
]);
return { isGitRepo: true, branch, status, recentCommits: commits };
} catch {
return { isGitRepo: false };

View File

@@ -0,0 +1,318 @@
/**
* Session Compaction Service
*
* Integrates auto-compaction with the agent loop and hooks system.
* Follows OpenCode's two-tier approach: pruning (remove old tool output)
* and compaction (summarize for fresh context).
*/
import type { Message } from "@/types/providers";
import {
CHARS_PER_TOKEN,
TOKEN_OVERFLOW_THRESHOLD,
PRUNE_MINIMUM_TOKENS,
PRUNE_PROTECT_TOKENS,
PRUNE_RECENT_TURNS,
PRUNE_PROTECTED_TOOLS,
TOKEN_MESSAGES,
} from "@constants/token";
import { getModelContextSize, DEFAULT_CONTEXT_SIZE } from "@constants/copilot";
import {
compactConversation,
checkCompactionNeeded,
getModelCompactionConfig,
createCompactionSummary,
} from "@services/auto-compaction";
import { appStore } from "@tui-solid/context/app";
/**
* Estimate tokens from content
*/
export const estimateTokens = (content: string): number => {
return Math.max(0, Math.round((content || "").length / CHARS_PER_TOKEN));
};
/**
* Estimate total tokens in message array
*/
export const estimateMessagesTokens = (messages: Message[]): number => {
return messages.reduce((total, msg) => {
const content =
typeof msg.content === "string"
? msg.content
: JSON.stringify(msg.content);
return total + estimateTokens(content);
}, 0);
};
/**
* Check if context overflow is imminent
*/
export const isContextOverflow = (
messages: Message[],
modelId?: string,
): boolean => {
const contextSize = modelId
? getModelContextSize(modelId)
: DEFAULT_CONTEXT_SIZE;
const currentTokens = estimateMessagesTokens(messages);
const threshold = contextSize.input * TOKEN_OVERFLOW_THRESHOLD;
return currentTokens >= threshold;
};
/**
* Prune old tool outputs from messages
*
* Strategy (following OpenCode):
* 1. Walk backwards through messages
* 2. Skip first N user turns (protect recent context)
* 3. Mark tool outputs for pruning once we accumulate enough tokens
* 4. Only prune if we can free minimum threshold
*/
export const pruneToolOutputs = (
messages: Message[],
options: {
minTokensToFree?: number;
protectThreshold?: number;
recentTurns?: number;
protectedTools?: Set<string>;
} = {},
): { messages: Message[]; prunedCount: number; tokensSaved: number } => {
const {
minTokensToFree = PRUNE_MINIMUM_TOKENS,
protectThreshold = PRUNE_PROTECT_TOKENS,
recentTurns = PRUNE_RECENT_TURNS,
protectedTools = PRUNE_PROTECTED_TOOLS,
} = options;
// Find tool messages to potentially prune
interface PruneCandidate {
index: number;
tokens: number;
}
const candidates: PruneCandidate[] = [];
let userTurnCount = 0;
let totalPrunableTokens = 0;
// Walk backwards through messages
for (let i = messages.length - 1; i >= 0; i--) {
const msg = messages[i];
// Count user turns
if (msg.role === "user") {
userTurnCount++;
}
// Skip if in protected recent turns
if (userTurnCount < recentTurns) {
continue;
}
// Check for tool messages
if (msg.role === "tool") {
// Extract tool name from tool_call_id if possible
const toolName = (msg as { tool_call_id?: string }).tool_call_id
?.split("-")[0] ?? "";
// Skip protected tools
if (protectedTools.has(toolName)) {
continue;
}
const content =
typeof msg.content === "string"
? msg.content
: JSON.stringify(msg.content);
const tokens = estimateTokens(content);
totalPrunableTokens += tokens;
// Only mark for pruning after we've accumulated enough
if (totalPrunableTokens > protectThreshold) {
candidates.push({ index: i, tokens });
}
}
}
// Calculate total tokens to save
const tokensSaved = candidates.reduce((sum, c) => sum + c.tokens, 0);
// Only prune if we can free minimum threshold
if (tokensSaved < minTokensToFree) {
return { messages, prunedCount: 0, tokensSaved: 0 };
}
// Create pruned messages
const prunedIndices = new Set(candidates.map((c) => c.index));
const prunedMessages = messages.map((msg, index) => {
if (prunedIndices.has(index)) {
// Replace content with truncation marker
return {
...msg,
content: "[Output pruned to save context]",
};
}
return msg;
});
return {
messages: prunedMessages,
prunedCount: candidates.length,
tokensSaved,
};
};
/**
* Perform full session compaction
*
* 1. First try pruning tool outputs
* 2. If still over threshold, do full compaction
*/
export const performSessionCompaction = async (
messages: Message[],
modelId?: string,
options?: {
onPruneStart?: () => void;
onPruneComplete?: (count: number, saved: number) => void;
onCompactStart?: () => void;
onCompactComplete?: (saved: number) => void;
},
): Promise<{
messages: Message[];
compacted: boolean;
pruned: boolean;
tokensSaved: number;
}> => {
const config = getModelCompactionConfig(modelId);
// Phase 1: Try pruning first
options?.onPruneStart?.();
const pruneResult = pruneToolOutputs(messages);
if (pruneResult.prunedCount > 0) {
options?.onPruneComplete?.(pruneResult.prunedCount, pruneResult.tokensSaved);
// Check if pruning was enough
const afterPruneCheck = checkCompactionNeeded(pruneResult.messages, config);
if (!afterPruneCheck.needsCompaction) {
return {
messages: pruneResult.messages,
compacted: false,
pruned: true,
tokensSaved: pruneResult.tokensSaved,
};
}
}
// Phase 2: Full compaction needed
options?.onCompactStart?.();
const compactResult = compactConversation(
pruneResult.prunedCount > 0 ? pruneResult.messages : messages,
config,
);
if (compactResult.result.compacted) {
options?.onCompactComplete?.(compactResult.result.tokensSaved);
}
const totalSaved = pruneResult.tokensSaved + compactResult.result.tokensSaved;
return {
messages: compactResult.messages,
compacted: compactResult.result.compacted,
pruned: pruneResult.prunedCount > 0,
tokensSaved: totalSaved,
};
};
/**
* Create a compaction check middleware for the agent loop
*/
export const createCompactionMiddleware = (
modelId?: string,
): {
shouldCompact: (messages: Message[]) => boolean;
compact: (
messages: Message[],
) => Promise<{ messages: Message[]; summary: string }>;
} => {
return {
shouldCompact: (messages: Message[]) => isContextOverflow(messages, modelId),
compact: async (messages: Message[]) => {
// Notify UI that compaction is starting
appStore.setIsCompacting(true);
try {
const result = await performSessionCompaction(messages, modelId, {
onPruneStart: () => {
appStore.setThinkingMessage("Pruning old tool outputs...");
},
onPruneComplete: (count, saved) => {
appStore.addLog({
type: "system",
content: `Pruned ${count} tool outputs (${saved.toLocaleString()} tokens)`,
});
},
onCompactStart: () => {
appStore.setThinkingMessage(TOKEN_MESSAGES.COMPACTION_STARTING);
},
onCompactComplete: (saved) => {
appStore.addLog({
type: "system",
content: TOKEN_MESSAGES.COMPACTION_COMPLETE(saved),
});
},
});
// Build summary
const parts: string[] = [];
if (result.pruned) {
parts.push("pruned old outputs");
}
if (result.compacted) {
parts.push("compacted conversation");
}
const summary =
parts.length > 0
? `Context management: ${parts.join(", ")} (${result.tokensSaved.toLocaleString()} tokens saved)`
: "";
return {
messages: result.messages,
summary,
};
} finally {
appStore.setIsCompacting(false);
appStore.setThinkingMessage(null);
}
},
};
};
/**
* Get compaction status for display
*/
export const getCompactionStatus = (
messages: Message[],
modelId?: string,
): {
currentTokens: number;
maxTokens: number;
usagePercent: number;
needsCompaction: boolean;
} => {
const contextSize = modelId
? getModelContextSize(modelId)
: DEFAULT_CONTEXT_SIZE;
const currentTokens = estimateMessagesTokens(messages);
const maxTokens = contextSize.input;
const usagePercent = maxTokens > 0 ? (currentTokens / maxTokens) * 100 : 0;
return {
currentTokens,
maxTokens,
usagePercent,
needsCompaction: isContextOverflow(messages, modelId),
};
};

View File

@@ -0,0 +1,435 @@
/**
* Skill Loader Service
*
* Parses SKILL.md files with frontmatter and body content.
* Supports progressive disclosure with 3 loading levels.
*/
import fs from "fs/promises";
import { join } from "path";
import {
SKILL_FILE,
SKILL_DIRS,
SKILL_DEFAULTS,
SKILL_ERRORS,
SKILL_REQUIRED_FIELDS,
SKILL_LOADING,
} from "@constants/skills";
import type {
SkillDefinition,
SkillMetadata,
SkillFrontmatter,
ParsedSkillFile,
SkillExample,
SkillLoadLevel,
} from "@/types/skills";
// ============================================================================
// Frontmatter Parsing
// ============================================================================
/**
* Parse YAML-like frontmatter from SKILL.md content
*/
const parseFrontmatter = (content: string): { frontmatter: string; body: string } => {
const delimiter = SKILL_FILE.FRONTMATTER_DELIMITER;
const lines = content.split("\n");
if (lines[0]?.trim() !== delimiter) {
return { frontmatter: "", body: content };
}
let endIndex = -1;
for (let i = 1; i < lines.length; i++) {
if (lines[i]?.trim() === delimiter) {
endIndex = i;
break;
}
}
if (endIndex === -1) {
return { frontmatter: "", body: content };
}
const frontmatter = lines.slice(1, endIndex).join("\n");
const body = lines.slice(endIndex + 1).join("\n").trim();
return { frontmatter, body };
};
/**
* Parse simple YAML-like frontmatter to object
* Supports: strings, arrays (- item), booleans
*/
const parseYamlLike = (yaml: string): Record<string, unknown> => {
const result: Record<string, unknown> = {};
const lines = yaml.split("\n");
let currentKey: string | null = null;
let currentArray: string[] | null = null;
for (const line of lines) {
const trimmed = line.trim();
if (!trimmed || trimmed.startsWith("#")) continue;
// Array item
if (trimmed.startsWith("- ") && currentKey) {
const value = trimmed.slice(2).trim();
if (!currentArray) {
currentArray = [];
}
// Remove quotes if present
const unquoted = value.replace(/^["']|["']$/g, "");
currentArray.push(unquoted);
result[currentKey] = currentArray;
continue;
}
// Key-value pair
const colonIndex = trimmed.indexOf(":");
if (colonIndex > 0) {
// Save previous array if exists
if (currentArray && currentKey) {
result[currentKey] = currentArray;
}
currentArray = null;
currentKey = trimmed.slice(0, colonIndex).trim();
const value = trimmed.slice(colonIndex + 1).trim();
if (!value) {
// Empty value, might be followed by array
continue;
}
// Parse value type
const parsed = parseValue(value);
result[currentKey] = parsed;
}
}
return result;
};
/**
* Parse a single YAML value
*/
const parseValue = (value: string): string | boolean | number => {
// Boolean
if (value === "true") return true;
if (value === "false") return false;
// Number
const num = Number(value);
if (!isNaN(num) && value !== "") return num;
// String (remove quotes if present)
return value.replace(/^["']|["']$/g, "");
};
/**
* Validate required fields in frontmatter
*/
const validateFrontmatter = (
data: Record<string, unknown>,
filePath: string,
): SkillFrontmatter => {
for (const field of SKILL_REQUIRED_FIELDS) {
if (!(field in data) || data[field] === undefined || data[field] === "") {
throw new Error(SKILL_ERRORS.MISSING_REQUIRED_FIELD(field, filePath));
}
}
// Validate triggers is an array
if (!Array.isArray(data.triggers)) {
throw new Error(SKILL_ERRORS.MISSING_REQUIRED_FIELD("triggers (array)", filePath));
}
return {
id: String(data.id),
name: String(data.name),
description: String(data.description),
version: data.version ? String(data.version) : undefined,
triggers: data.triggers as string[],
triggerType: data.triggerType as SkillFrontmatter["triggerType"],
autoTrigger: typeof data.autoTrigger === "boolean" ? data.autoTrigger : undefined,
requiredTools: Array.isArray(data.requiredTools)
? (data.requiredTools as string[])
: undefined,
tags: Array.isArray(data.tags) ? (data.tags as string[]) : undefined,
};
};
// ============================================================================
// Example Parsing
// ============================================================================
/**
* Parse examples from skill body
* Examples are in format:
* ## Examples
* ### Example 1
* Input: ...
* Output: ...
*/
const parseExamples = (body: string): SkillExample[] => {
const examples: SkillExample[] = [];
const exampleSection = body.match(/## Examples([\s\S]*?)(?=##[^#]|$)/i);
if (!exampleSection) return examples;
const content = exampleSection[1];
const exampleBlocks = content.split(/### /);
for (const block of exampleBlocks) {
if (!block.trim()) continue;
const inputMatch = block.match(/Input:\s*([\s\S]*?)(?=Output:|$)/i);
const outputMatch = block.match(/Output:\s*([\s\S]*?)(?=###|$)/i);
if (inputMatch && outputMatch) {
const descMatch = block.match(/^([^\n]+)/);
examples.push({
input: inputMatch[1].trim(),
output: outputMatch[1].trim(),
description: descMatch ? descMatch[1].trim() : undefined,
});
}
}
return examples;
};
// ============================================================================
// File Loading
// ============================================================================
/**
* Load and parse a SKILL.md file
*/
export const loadSkillFile = async (filePath: string): Promise<ParsedSkillFile> => {
try {
const stat = await fs.stat(filePath);
if (stat.size > SKILL_LOADING.MAX_FILE_SIZE_BYTES) {
throw new Error(`Skill file too large: ${filePath}`);
}
const content = await fs.readFile(filePath, SKILL_FILE.ENCODING);
const { frontmatter, body } = parseFrontmatter(content);
if (!frontmatter) {
throw new Error(SKILL_ERRORS.INVALID_FRONTMATTER(filePath));
}
const data = parseYamlLike(frontmatter);
const validatedFrontmatter = validateFrontmatter(data, filePath);
const examples = parseExamples(body);
return {
frontmatter: validatedFrontmatter,
body,
examples: examples.length > 0 ? examples : undefined,
filePath,
};
} catch (error) {
const message = error instanceof Error ? error.message : String(error);
throw new Error(SKILL_ERRORS.LOAD_FAILED(filePath, message));
}
};
/**
* Convert parsed skill file to SkillMetadata (Level 1)
*/
export const toSkillMetadata = (parsed: ParsedSkillFile): SkillMetadata => ({
id: parsed.frontmatter.id,
name: parsed.frontmatter.name,
description: parsed.frontmatter.description,
version: parsed.frontmatter.version ?? SKILL_DEFAULTS.VERSION,
triggers: parsed.frontmatter.triggers,
triggerType: parsed.frontmatter.triggerType ?? SKILL_DEFAULTS.TRIGGER_TYPE,
autoTrigger: parsed.frontmatter.autoTrigger ?? SKILL_DEFAULTS.AUTO_TRIGGER,
requiredTools: parsed.frontmatter.requiredTools ?? SKILL_DEFAULTS.REQUIRED_TOOLS,
tags: parsed.frontmatter.tags,
});
/**
* Convert parsed skill file to full SkillDefinition (Level 3)
*/
export const toSkillDefinition = (parsed: ParsedSkillFile): SkillDefinition => {
const metadata = toSkillMetadata(parsed);
// Extract system prompt and instructions from body
const { systemPrompt, instructions } = parseSkillBody(parsed.body);
return {
...metadata,
systemPrompt,
instructions,
examples: parsed.examples,
loadedAt: Date.now(),
};
};
/**
* Parse skill body to extract system prompt and instructions
*/
const parseSkillBody = (body: string): { systemPrompt: string; instructions: string } => {
// Look for ## System Prompt section
const systemPromptMatch = body.match(
/## System Prompt([\s\S]*?)(?=## Instructions|## Examples|$)/i,
);
// Look for ## Instructions section
const instructionsMatch = body.match(
/## Instructions([\s\S]*?)(?=## Examples|## System Prompt|$)/i,
);
// If no sections found, use the whole body as instructions
const systemPrompt = systemPromptMatch ? systemPromptMatch[1].trim() : "";
const instructions = instructionsMatch ? instructionsMatch[1].trim() : body.trim();
return { systemPrompt, instructions };
};
// ============================================================================
// Directory Scanning
// ============================================================================
/**
* Find all SKILL.md files in a directory
*/
export const findSkillFiles = async (dir: string): Promise<string[]> => {
const skillFiles: string[] = [];
try {
const entries = await fs.readdir(dir, { withFileTypes: true });
for (const entry of entries) {
const fullPath = join(dir, entry.name);
if (entry.isDirectory()) {
// Check for SKILL.md in subdirectory
const skillPath = join(fullPath, SKILL_FILE.NAME);
try {
await fs.access(skillPath);
skillFiles.push(skillPath);
} catch {
// No SKILL.md in this directory
}
} else if (entry.name === SKILL_FILE.NAME) {
skillFiles.push(fullPath);
}
}
} catch {
// Directory doesn't exist or isn't accessible
}
return skillFiles;
};
/**
* Load all skills from a directory
*/
export const loadSkillsFromDirectory = async (
dir: string,
level: SkillLoadLevel = "metadata",
): Promise<SkillDefinition[]> => {
const skillFiles = await findSkillFiles(dir);
const skills: SkillDefinition[] = [];
for (const filePath of skillFiles) {
try {
const parsed = await loadSkillFile(filePath);
if (level === "metadata") {
// Only load metadata, but cast to SkillDefinition for uniform handling
const metadata = toSkillMetadata(parsed);
skills.push({
...metadata,
systemPrompt: "",
instructions: "",
});
} else {
skills.push(toSkillDefinition(parsed));
}
} catch (error) {
// Log error but continue loading other skills
console.error(
`Failed to load skill: ${filePath}`,
error instanceof Error ? error.message : error,
);
}
}
return skills;
};
/**
* Load all skills from all skill directories
*/
export const loadAllSkills = async (
level: SkillLoadLevel = "metadata",
): Promise<SkillDefinition[]> => {
const allSkills: SkillDefinition[] = [];
const dirs = [SKILL_DIRS.BUILTIN, SKILL_DIRS.USER];
// Add project skills if we're in a project directory
const projectSkillsDir = join(process.cwd(), SKILL_DIRS.PROJECT);
try {
await fs.access(projectSkillsDir);
dirs.push(projectSkillsDir);
} catch {
// No project skills directory
}
for (const dir of dirs) {
const skills = await loadSkillsFromDirectory(dir, level);
allSkills.push(...skills);
}
// Deduplicate by ID (later ones override earlier)
const skillMap = new Map<string, SkillDefinition>();
for (const skill of allSkills) {
skillMap.set(skill.id, skill);
}
return Array.from(skillMap.values());
};
/**
* Load a specific skill by ID
*/
export const loadSkillById = async (
id: string,
level: SkillLoadLevel = "full",
): Promise<SkillDefinition | null> => {
const dirs = [SKILL_DIRS.BUILTIN, SKILL_DIRS.USER];
const projectSkillsDir = join(process.cwd(), SKILL_DIRS.PROJECT);
try {
await fs.access(projectSkillsDir);
dirs.push(projectSkillsDir);
} catch {
// No project skills directory
}
// Search in reverse order (project > user > builtin)
for (const dir of dirs.reverse()) {
const skillPath = join(dir, id, SKILL_FILE.NAME);
try {
await fs.access(skillPath);
const parsed = await loadSkillFile(skillPath);
if (parsed.frontmatter.id === id) {
return level === "metadata"
? { ...toSkillMetadata(parsed), systemPrompt: "", instructions: "" }
: toSkillDefinition(parsed);
}
} catch {
// Skill not found in this directory
}
}
return null;
};

View File

@@ -0,0 +1,407 @@
/**
* Skill Registry Service
*
* Manages skill registration, matching, and invocation.
* Uses progressive disclosure to load skills on demand.
*/
import {
SKILL_MATCHING,
SKILL_LOADING,
SKILL_ERRORS,
} from "@constants/skills";
import {
loadAllSkills,
loadSkillById,
} from "@services/skill-loader";
import type {
SkillDefinition,
SkillMatch,
SkillContext,
SkillExecutionResult,
SkillRegistryState,
} from "@/types/skills";
// ============================================================================
// State Management
// ============================================================================
let registryState: SkillRegistryState = {
skills: new Map(),
lastLoadedAt: null,
loadErrors: [],
};
/**
* Get current registry state
*/
export const getRegistryState = (): SkillRegistryState => ({
skills: new Map(registryState.skills),
lastLoadedAt: registryState.lastLoadedAt,
loadErrors: [...registryState.loadErrors],
});
/**
* Check if cache is stale
*/
const isCacheStale = (): boolean => {
if (!registryState.lastLoadedAt) return true;
return Date.now() - registryState.lastLoadedAt > SKILL_LOADING.CACHE_TTL_MS;
};
// ============================================================================
// Skill Registration
// ============================================================================
/**
* Initialize skill registry with all available skills
*/
export const initializeRegistry = async (): Promise<void> => {
try {
const skills = await loadAllSkills("metadata");
registryState.skills.clear();
registryState.loadErrors = [];
for (const skill of skills) {
registryState.skills.set(skill.id, skill);
}
registryState.lastLoadedAt = Date.now();
} catch (error) {
const message = error instanceof Error ? error.message : String(error);
registryState.loadErrors.push(message);
}
};
/**
* Register a skill manually
*/
export const registerSkill = (skill: SkillDefinition): void => {
registryState.skills.set(skill.id, skill);
};
/**
* Unregister a skill
*/
export const unregisterSkill = (skillId: string): boolean => {
return registryState.skills.delete(skillId);
};
/**
* Get a skill by ID
*/
export const getSkill = (skillId: string): SkillDefinition | undefined => {
return registryState.skills.get(skillId);
};
/**
* Get all registered skills
*/
export const getAllSkills = (): SkillDefinition[] => {
return Array.from(registryState.skills.values());
};
/**
* Refresh registry if cache is stale
*/
export const refreshIfNeeded = async (): Promise<void> => {
if (isCacheStale()) {
await initializeRegistry();
}
};
// ============================================================================
// Skill Matching
// ============================================================================
/**
* Calculate string similarity using Levenshtein distance
*/
const calculateSimilarity = (a: string, b: string): number => {
const aLower = a.toLowerCase();
const bLower = b.toLowerCase();
if (aLower === bLower) return 1;
if (aLower.includes(bLower) || bLower.includes(aLower)) {
return 0.8;
}
// Simple word overlap for fuzzy matching
const aWords = new Set(aLower.split(/\s+/));
const bWords = new Set(bLower.split(/\s+/));
const intersection = [...aWords].filter((word) => bWords.has(word));
if (intersection.length === 0) return 0;
return intersection.length / Math.max(aWords.size, bWords.size);
};
/**
* Check if input matches a trigger pattern
*/
const matchTrigger = (
input: string,
trigger: string,
): { matches: boolean; confidence: number } => {
const inputLower = input.toLowerCase().trim();
const triggerLower = trigger.toLowerCase().trim();
// Exact match (command style)
if (inputLower === triggerLower) {
return { matches: true, confidence: 1.0 };
}
// Command prefix match
if (trigger.startsWith(SKILL_MATCHING.COMMAND_PREFIX)) {
if (inputLower.startsWith(triggerLower)) {
return { matches: true, confidence: 0.95 };
}
}
// Input starts with trigger
if (inputLower.startsWith(triggerLower)) {
return { matches: true, confidence: 0.9 };
}
// Fuzzy match
const similarity = calculateSimilarity(inputLower, triggerLower);
if (similarity >= SKILL_MATCHING.FUZZY_THRESHOLD) {
return { matches: true, confidence: similarity };
}
return { matches: false, confidence: 0 };
};
/**
* Find matching skills for user input
*/
export const findMatchingSkills = async (input: string): Promise<SkillMatch[]> => {
await refreshIfNeeded();
const matches: SkillMatch[] = [];
const inputLower = input.toLowerCase().trim();
for (const skill of registryState.skills.values()) {
let bestMatch: { trigger: string; confidence: number } | null = null;
for (const trigger of skill.triggers) {
const result = matchTrigger(inputLower, trigger);
if (result.matches) {
if (!bestMatch || result.confidence > bestMatch.confidence) {
bestMatch = { trigger, confidence: result.confidence };
}
}
}
if (bestMatch && bestMatch.confidence >= SKILL_MATCHING.MIN_CONFIDENCE) {
matches.push({
skill,
confidence: bestMatch.confidence,
matchedTrigger: bestMatch.trigger,
matchType: skill.triggerType,
});
}
}
// Sort by confidence (highest first)
matches.sort((a, b) => b.confidence - a.confidence);
return matches;
};
/**
* Find the best matching skill for input
*/
export const findBestMatch = async (input: string): Promise<SkillMatch | null> => {
const matches = await findMatchingSkills(input);
return matches.length > 0 ? matches[0] : null;
};
/**
* Check if input matches a command pattern (starts with /)
*/
export const isCommandInput = (input: string): boolean => {
return input.trim().startsWith(SKILL_MATCHING.COMMAND_PREFIX);
};
/**
* Extract command name from input
*/
export const extractCommandName = (input: string): string | null => {
if (!isCommandInput(input)) return null;
const trimmed = input.trim();
const spaceIndex = trimmed.indexOf(" ");
if (spaceIndex === -1) {
return trimmed.slice(1); // Remove leading /
}
return trimmed.slice(1, spaceIndex);
};
// ============================================================================
// Skill Execution
// ============================================================================
/**
* Load full skill definition for execution
*/
export const loadSkillForExecution = async (
skillId: string,
): Promise<SkillDefinition | null> => {
// Check if already fully loaded
const existing = registryState.skills.get(skillId);
if (existing && existing.systemPrompt && existing.instructions) {
return existing;
}
// Load full definition
const fullSkill = await loadSkillById(skillId, "full");
if (fullSkill) {
registryState.skills.set(skillId, fullSkill);
return fullSkill;
}
return null;
};
/**
* Build prompt with skill context
*/
export const buildSkillPrompt = (
skill: SkillDefinition,
context: SkillContext,
): string => {
const parts: string[] = [];
// Add system prompt if present
if (skill.systemPrompt) {
parts.push(skill.systemPrompt);
}
// Add instructions
if (skill.instructions) {
parts.push("## Instructions\n" + skill.instructions);
}
// Add examples if present
if (skill.examples && skill.examples.length > 0) {
parts.push("## Examples");
for (const example of skill.examples) {
if (example.description) {
parts.push(`### ${example.description}`);
}
parts.push(`Input: ${example.input}`);
parts.push(`Output: ${example.output}`);
}
}
// Add context
parts.push("## Context");
parts.push(`Working directory: ${context.workingDir}`);
if (context.gitBranch) {
parts.push(`Git branch: ${context.gitBranch}`);
}
parts.push(`User input: ${context.userInput}`);
return parts.join("\n\n");
};
/**
* Execute a skill
*/
export const executeSkill = async (
skillId: string,
context: SkillContext,
): Promise<SkillExecutionResult> => {
try {
const skill = await loadSkillForExecution(skillId);
if (!skill) {
return {
success: false,
skillId,
prompt: "",
error: SKILL_ERRORS.NOT_FOUND(skillId),
};
}
const prompt = buildSkillPrompt(skill, context);
return {
success: true,
skillId,
prompt,
};
} catch (error) {
const message = error instanceof Error ? error.message : String(error);
return {
success: false,
skillId,
prompt: "",
error: SKILL_ERRORS.EXECUTION_FAILED(skillId, message),
};
}
};
/**
* Execute skill from user input (auto-detect and execute)
*/
export const executeFromInput = async (
input: string,
context: Omit<SkillContext, "userInput">,
): Promise<SkillExecutionResult | null> => {
const match = await findBestMatch(input);
if (!match) {
return null;
}
return executeSkill(match.skill.id, {
...context,
userInput: input,
});
};
// ============================================================================
// Utility Functions
// ============================================================================
/**
* Get skills that can auto-trigger
*/
export const getAutoTriggerSkills = (): SkillDefinition[] => {
return Array.from(registryState.skills.values()).filter(
(skill) => skill.autoTrigger,
);
};
/**
* Get skills by tag
*/
export const getSkillsByTag = (tag: string): SkillDefinition[] => {
return Array.from(registryState.skills.values()).filter(
(skill) => skill.tags?.includes(tag),
);
};
/**
* Get available command completions
*/
export const getCommandCompletions = (partial: string): string[] => {
const commands: string[] = [];
for (const skill of registryState.skills.values()) {
for (const trigger of skill.triggers) {
if (trigger.startsWith(SKILL_MATCHING.COMMAND_PREFIX)) {
if (trigger.toLowerCase().startsWith(partial.toLowerCase())) {
commands.push(trigger);
}
}
}
}
return commands.sort();
};

107
src/skills/commit/SKILL.md Normal file
View File

@@ -0,0 +1,107 @@
---
id: commit
name: Commit Changes
description: Create a well-crafted git commit message and commit staged changes
version: 1.0.0
triggers:
- /commit
- commit changes
- commit this
- git commit
- make a commit
triggerType: command
autoTrigger: false
requiredTools:
- bash
tags:
- git
- workflow
---
## System Prompt
You are a commit message expert. You analyze staged changes and craft clear, conventional commit messages that explain the "why" behind changes.
## Instructions
Follow this process to create a commit:
### Step 1: Analyze Changes
Run these commands to understand what's being committed:
1. `git status` - See staged and unstaged files
2. `git diff --cached` - View staged changes in detail
3. `git log --oneline -5` - Check recent commit style
### Step 2: Determine Commit Type
Based on the changes, identify the commit type:
- **feat**: New feature
- **fix**: Bug fix
- **docs**: Documentation only
- **style**: Formatting, no code change
- **refactor**: Code change that neither fixes a bug nor adds a feature
- **perf**: Performance improvement
- **test**: Adding/updating tests
- **chore**: Maintenance tasks
### Step 3: Write Commit Message
Format: `<type>(<scope>): <subject>`
Rules:
- Subject line max 50 characters
- Use imperative mood ("Add feature" not "Added feature")
- No period at the end
- Body explains "why" not "what" (the diff shows what)
### Step 4: Create Commit
Use bash to run:
```bash
git commit -m "<type>(<scope>): <subject>
<body if needed>"
```
### Important Notes
- Never use `--no-verify` unless explicitly requested
- Never commit sensitive files (.env, credentials, secrets)
- If no changes are staged, inform the user and offer to stage them
- Ask for confirmation before committing if unsure about scope
## Examples
### Fix a bug
Input: /commit
Output:
```
Analyzing staged changes...
Changes detected:
- Modified: src/utils/date.ts (fixed timezone handling)
Suggested commit:
fix(utils): handle UTC offset in date formatting
The date formatter was ignoring timezone offsets, causing
incorrect display for users in non-UTC timezones.
Proceed with commit? [Y/n]
```
### Add new feature
Input: commit changes
Output:
```
Analyzing staged changes...
Changes detected:
- Added: src/features/export/index.ts
- Added: src/features/export/csv.ts
- Modified: src/types/index.ts
Suggested commit:
feat(export): add CSV export functionality
Users can now export their data as CSV files.
Supports custom delimiters and header configuration.
Proceed with commit? [Y/n]
```

217
src/skills/explain/SKILL.md Normal file
View File

@@ -0,0 +1,217 @@
---
id: explain
name: Explain Code
description: Provide clear, detailed explanations of code, architecture, or concepts
version: 1.0.0
triggers:
- /explain
- explain this
- explain code
- what does this do
- how does this work
triggerType: command
autoTrigger: false
requiredTools:
- read
- grep
- glob
tags:
- learning
- documentation
---
## System Prompt
You are a patient, thorough technical educator. You explain code and concepts at the appropriate level for the user, using analogies, examples, and progressive disclosure. You read the actual code before explaining and reference specific files and line numbers.
## Instructions
Follow this explanation process:
### Step 1: Identify What to Explain
1. If a file path is given, read that file
2. If a concept is mentioned, search for related code
3. If unclear, ask the user to clarify
### Step 2: Read and Understand
1. Read the relevant file(s) completely
2. Identify the key components and their relationships
3. Note any patterns, dependencies, or unusual constructs
### Step 3: Structure the Explanation
Use this format:
```
## Overview
[1-2 sentence summary of what this code does]
## Key Concepts
[List the main concepts someone needs to understand]
## How It Works
[Step-by-step explanation of the code flow]
## Code Walkthrough
[Annotated code snippets with explanations]
## Connections
[How this relates to other parts of the codebase]
## Summary
[Recap of the key points]
```
### Explanation Guidelines
1. **Start Simple**: Begin with a high-level overview before diving into details
2. **Use Analogies**: Relate complex concepts to familiar things
3. **Show, Don't Just Tell**: Include relevant code snippets
4. **Reference Locations**: Always cite `file:line` for code references
5. **Progressive Disclosure**: Start with "what" then go to "how" then "why"
6. **Check Understanding**: End with a summary that reinforces key points
### Depth Levels
Adjust explanation depth based on user's apparent level:
- **Beginner**: More analogies, simpler terms, more context
- **Intermediate**: Balance of concepts and implementation details
- **Advanced**: Focus on nuances, edge cases, design decisions
## Examples
### Explain a Function
Input: explain this function in src/utils/debounce.ts
Output:
```
## Overview
The `debounce` function delays executing a function until a certain amount of time has passed since it was last called. Think of it like an elevator: it waits for people to stop entering before closing the doors.
## Key Concepts
- **Debouncing**: Grouping multiple rapid calls into one
- **Closures**: The returned function "remembers" the timer
- **setTimeout/clearTimeout**: JavaScript's timing mechanisms
## How It Works
1. User calls the debounced function
2. Previous timer (if any) is cancelled
3. New timer is set for the delay period
4. If no new calls come in, the original function executes
5. If a new call comes in, repeat from step 2
## Code Walkthrough
\`\`\`typescript
// src/utils/debounce.ts:5-20
export function debounce<T extends (...args: any[]) => any>(
fn: T, // The function to debounce
delay: number // How long to wait (ms)
): (...args: Parameters<T>) => void {
let timeoutId: ReturnType<typeof setTimeout> | null = null;
// Return a new function that wraps the original
return (...args: Parameters<T>) => {
// Cancel any pending execution
if (timeoutId) {
clearTimeout(timeoutId);
}
// Schedule new execution
timeoutId = setTimeout(() => {
fn(...args); // Call original with latest args
timeoutId = null; // Clean up
}, delay);
};
}
\`\`\`
## Connections
This utility is used by:
- `src/components/SearchInput.tsx:23` - Debounce search API calls
- `src/hooks/useWindowResize.ts:8` - Debounce resize handlers
## Summary
`debounce` prevents a function from being called too frequently by waiting until calls stop coming in. It's essential for performance when handling rapid events like typing or resizing.
```
### Explain Architecture
Input: how does authentication work in this project?
Output:
```
## Overview
Authentication uses JWT (JSON Web Tokens) with a refresh token flow. Users log in once and receive two tokens: a short-lived access token for API calls and a long-lived refresh token for getting new access tokens.
## Key Concepts
- **JWT**: Self-contained tokens with encoded user data
- **Access Token**: Short-lived (15 min), used for API authorization
- **Refresh Token**: Long-lived (7 days), used to get new access tokens
- **HTTP-only Cookies**: Secure storage immune to XSS
## How It Works
1. **Login** (`src/auth/login.ts:15`)
- User submits credentials
- Server validates against database
- Server generates access + refresh tokens
- Tokens stored in HTTP-only cookies
2. **API Request** (`src/middleware/auth.ts:8`)
- Client sends request with cookies
- Middleware extracts access token
- JWT is verified and decoded
- User data attached to request
3. **Token Refresh** (`src/auth/refresh.ts:22`)
- Access token expires (15 min)
- Client automatically calls /refresh
- Server validates refresh token
- New access token issued
4. **Logout** (`src/auth/logout.ts:10`)
- Refresh token invalidated in database
- Cookies cleared
## Code Walkthrough
\`\`\`typescript
// src/middleware/auth.ts:8-25
export const requireAuth = async (req, res, next) => {
const token = req.cookies.accessToken;
if (!token) {
return res.status(401).json({ error: 'Not authenticated' });
}
try {
const decoded = jwt.verify(token, process.env.JWT_SECRET);
req.user = decoded; // Attach user to request
next();
} catch (err) {
if (err.name === 'TokenExpiredError') {
return res.status(401).json({ error: 'Token expired' });
}
return res.status(401).json({ error: 'Invalid token' });
}
};
\`\`\`
## Connections
- **User Model**: `src/models/User.ts` - Password hashing, user data
- **Protected Routes**: `src/routes/api/*.ts` - Use `requireAuth` middleware
- **Frontend**: `src/hooks/useAuth.ts` - Client-side auth state
## Summary
Authentication is handled through JWTs stored in HTTP-only cookies. The access token is short-lived for security, while the refresh token enables seamless re-authentication. The middleware pattern keeps protected routes clean and centralized.
```

View File

@@ -0,0 +1,254 @@
---
id: feature-dev
name: Feature Development
description: Guided 7-phase workflow for implementing new features with checkpoints
version: 1.0.0
triggers:
- /feature
- /feature-dev
- implement feature
- new feature
- build feature
- develop feature
triggerType: command
autoTrigger: false
requiredTools:
- read
- write
- edit
- bash
- glob
- grep
tags:
- workflow
- feature
- development
---
## System Prompt
You are CodeTyper in Feature Development mode - a structured approach to implementing new features. You guide the user through a 7-phase workflow with checkpoints for approval at critical stages.
## Instructions
### The 7 Phases
1. **UNDERSTAND** - Clarify requirements before coding
2. **EXPLORE** - Search codebase for relevant patterns
3. **PLAN** - Design the implementation approach
4. **IMPLEMENT** - Write the code changes
5. **VERIFY** - Run tests and validate
6. **REVIEW** - Self-review for quality
7. **FINALIZE** - Commit and complete
### Phase 1: UNDERSTAND
Goal: Fully understand what needs to be built.
Tasks:
- Analyze the feature request
- Identify unclear requirements
- Ask clarifying questions
- Document requirements
Output:
- List of requirements
- Assumptions made
- Questions for user (if any)
**Checkpoint:** Confirm requirements with user
### Phase 2: EXPLORE
Goal: Understand the existing codebase.
Tasks:
- Search for related code patterns
- Find files that need modification
- Identify conventions to follow
- Find similar implementations
Use parallel searches for efficiency.
Output:
- Relevant files list
- Patterns to follow
- Dependencies identified
### Phase 3: PLAN
Goal: Create a detailed implementation plan.
Tasks:
- Design solution architecture
- List files to create/modify/delete
- Define implementation order
- Identify risks
- Plan test strategy
Output:
- Step-by-step plan
- File change list
- Risk assessment
- Test plan
**Checkpoint:** Get plan approval before implementing
### Phase 4: IMPLEMENT
Goal: Write the code according to plan.
Tasks:
- Follow plan step by step
- Write clean, documented code
- Follow existing conventions
- Track all changes
Guidelines:
- One step at a time
- Minimal focused changes
- Add comments for complex logic
### Phase 5: VERIFY
Goal: Ensure implementation works correctly.
Tasks:
- Run existing tests
- Add new tests for feature
- Fix any failures
- Check for regressions
**Checkpoint:** Review test results
### Phase 6: REVIEW
Goal: Quality review of implementation.
Review for:
- Code clarity
- Error handling
- Edge cases
- Performance
- Security
- Documentation
**Checkpoint:** Address critical findings
### Phase 7: FINALIZE
Goal: Complete the feature.
Tasks:
- Create commit with good message
- Update documentation
- Clean up temporary files
- Summarize changes
**Checkpoint:** Approve final commit
## Examples
### Simple Feature
Input: /feature add a dark mode toggle to settings
Output:
```
# Feature Development: Dark Mode Toggle
## Phase 1: UNDERSTAND
I'll help you implement a dark mode toggle. Let me understand the requirements:
**Requirements identified:**
1. Add toggle switch in settings
2. Persist user preference
3. Apply dark theme when enabled
**Questions:**
- Should it respect system preference by default?
- Where exactly in settings should it appear?
[Awaiting user input...]
```
### Complex Feature
Input: implement feature for exporting data as CSV
Output:
```
# Feature Development: CSV Export
## Phase 1: UNDERSTAND
Analyzing the export feature request...
**Requirements:**
1. Export user data to CSV format
2. Support custom column selection
3. Handle large datasets
4. Provide download mechanism
**Assumptions:**
- Export from current view/filter
- UTF-8 encoding
- Include headers
**Clarifications needed:**
- Which data entities should be exportable?
- Any size limits?
- Should it support scheduled exports?
[Checkpoint: Please confirm requirements]
---
## Phase 2: EXPLORE
Searching for relevant code...
**Found:**
- Export utilities: `src/utils/export.ts`
- Data fetching: `src/services/data-service.ts`
- Similar feature: PDF export at `src/features/pdf-export/`
**Patterns to follow:**
- Use streaming for large files
- Progress callback pattern
- Error boundary wrapping
[Proceeding to planning...]
---
## Phase 3: PLAN
**Implementation Plan**
1. Create `src/features/csv-export/types.ts`
- Define export options interface
- Column configuration types
2. Create `src/features/csv-export/formatter.ts`
- CSV formatting logic
- Handle special characters
3. Create `src/features/csv-export/index.ts`
- Main export function
- Streaming support
4. Modify `src/components/DataTable.tsx`
- Add export button
- Wire up export action
**Risks:**
- Memory issues with large datasets → Use streaming
- Character encoding → Test with unicode
**Test Strategy:**
- Unit tests for formatter
- Integration test for full export
- Edge case: empty data, special characters
[Checkpoint: Approve plan to proceed?]
```

View File

@@ -0,0 +1,200 @@
---
id: review-pr
name: Review Pull Request
description: Perform a comprehensive code review on a pull request or set of changes
version: 1.0.0
triggers:
- /review-pr
- /review
- review pr
- review this pr
- review pull request
- code review
triggerType: command
autoTrigger: false
requiredTools:
- bash
- read
- grep
tags:
- git
- review
- workflow
---
## System Prompt
You are an expert code reviewer. You analyze changes thoroughly, looking for bugs, security issues, performance problems, and style inconsistencies. You provide constructive, specific feedback with concrete suggestions for improvement.
## Instructions
Follow this structured review process:
### Step 1: Gather Context
1. Get the diff: `git diff main...HEAD` (or specified base branch)
2. List changed files: `git diff --name-only main...HEAD`
3. Check commit history: `git log --oneline main...HEAD`
### Step 2: Review Categories
Analyze changes in these categories (only report findings with ≥80% confidence):
#### Security Review
- [ ] Input validation and sanitization
- [ ] SQL injection, XSS, command injection risks
- [ ] Authentication and authorization checks
- [ ] Sensitive data exposure
- [ ] Dependency vulnerabilities
#### Logic Review
- [ ] Edge cases and boundary conditions
- [ ] Error handling completeness
- [ ] Race conditions and concurrency issues
- [ ] Null/undefined handling
- [ ] Type safety
#### Performance Review
- [ ] Algorithmic complexity (O(n) analysis)
- [ ] Memory usage patterns
- [ ] Unnecessary re-renders (React)
- [ ] Database query efficiency
- [ ] Caching opportunities
#### Style Review
- [ ] Naming conventions
- [ ] Code organization
- [ ] Documentation completeness
- [ ] Test coverage
- [ ] Consistency with codebase patterns
### Step 3: Format Findings
For each finding, provide:
```
**[SEVERITY]** Category: Brief title
📍 Location: `file:line`
**Issue**: Description of the problem
**Suggestion**: How to fix it
**Example** (if applicable):
\`\`\`
// Before
problematic code
// After
improved code
\`\`\`
```
Severity levels:
- 🔴 **CRITICAL**: Must fix before merge (security, data loss)
- 🟠 **WARNING**: Should fix (bugs, significant issues)
- 🟡 **SUGGESTION**: Consider fixing (improvements)
- 🟢 **NITPICK**: Optional (style, preferences)
### Step 4: Summary
Provide an overall assessment:
- Number of findings by severity
- Overall quality rating (1-5 stars)
- Recommendation: Approve / Request Changes / Comment
## Examples
### Security Finding
Input: /review-pr
Output:
```
## Pull Request Review
### Findings
🔴 **CRITICAL** Security: SQL Injection vulnerability
📍 Location: `src/db/users.ts:45`
**Issue**: User input is directly interpolated into SQL query without parameterization.
**Suggestion**: Use parameterized queries or an ORM.
**Example**:
\`\`\`typescript
// Before
const query = `SELECT * FROM users WHERE id = ${userId}`;
// After
const query = 'SELECT * FROM users WHERE id = $1';
const result = await db.query(query, [userId]);
\`\`\`
---
🟠 **WARNING** Logic: Missing null check
📍 Location: `src/utils/format.ts:23`
**Issue**: `user.name` accessed without null check, will throw if user is undefined.
**Suggestion**: Add optional chaining or explicit null check.
---
### Summary
| Severity | Count |
|----------|-------|
| Critical | 1 |
| Warning | 1 |
| Suggestion | 0 |
**Rating**: ⭐⭐ (2/5)
**Recommendation**: 🔴 Request Changes
The SQL injection vulnerability must be fixed before this can be merged.
```
### Clean PR
Input: review this pr
Output:
```
## Pull Request Review
### Findings
🟡 **SUGGESTION** Style: Consider extracting magic number
📍 Location: `src/config/limits.ts:12`
**Issue**: The value `86400000` could be more readable.
**Suggestion**: Extract to named constant.
\`\`\`typescript
// Before
const CACHE_TTL = 86400000;
// After
const MS_PER_DAY = 24 * 60 * 60 * 1000;
const CACHE_TTL = MS_PER_DAY;
\`\`\`
---
### Summary
| Severity | Count |
|----------|-------|
| Critical | 0 |
| Warning | 0 |
| Suggestion | 1 |
**Rating**: ⭐⭐⭐⭐⭐ (5/5)
**Recommendation**: ✅ Approve
Clean implementation with good test coverage. The suggestion is optional.
```

View File

@@ -0,0 +1,366 @@
/**
* Apply Patch Execution
*
* Applies unified diff patches to files with fuzzy matching and rollback support.
*/
import fs from "fs/promises";
import { dirname, join, isAbsolute } from "path";
import {
PATCH_DEFAULTS,
PATCH_ERRORS,
PATCH_MESSAGES,
PATCH_TITLES,
} from "@constants/apply-patch";
import { parsePatch, validatePatch, getTargetPath, reversePatch } from "@tools/apply-patch/parser";
import { findHunkPosition, isHunkApplied, previewHunkApplication } from "@tools/apply-patch/matcher";
import type { ApplyPatchParams } from "@tools/apply-patch/params";
import type {
FilePatchResult,
HunkApplicationResult,
PatchRollback,
ParsedFilePatch,
} from "@/types/apply-patch";
import type { ToolContext, ToolResult } from "@tools/types";
// Rollback storage (in-memory for session)
const rollbackStore: Map<string, PatchRollback> = new Map();
/**
* Execute the apply_patch tool
*/
export const executeApplyPatch = async (
params: ApplyPatchParams,
ctx: ToolContext,
): Promise<ToolResult> => {
try {
// Parse the patch
const parsedPatch = parsePatch(params.patch);
// Validate the patch
const validation = validatePatch(parsedPatch);
if (!validation.valid) {
return {
success: false,
title: PATCH_TITLES.FAILED,
output: "",
error: validation.errors.join("\n"),
};
}
// Apply to each file
const results: FilePatchResult[] = [];
let totalPatched = 0;
let totalFailed = 0;
for (let filePatch of parsedPatch.files) {
// Skip binary files
if (filePatch.isBinary) {
results.push({
success: true,
filePath: getTargetPath(filePatch),
hunksApplied: 0,
hunksFailed: 0,
hunkResults: [],
error: PATCH_MESSAGES.SKIPPED_BINARY(getTargetPath(filePatch)),
});
continue;
}
// Reverse if requested
if (params.reverse) {
filePatch = reversePatch(filePatch);
}
// Determine target file path
const targetPath = params.targetFile ?? getTargetPath(filePatch);
const absolutePath = isAbsolute(targetPath)
? targetPath
: join(ctx.workingDir, targetPath);
// Apply the file patch
const result = await applyFilePatch(
filePatch,
absolutePath,
{
fuzz: params.fuzz ?? PATCH_DEFAULTS.FUZZ,
dryRun: params.dryRun ?? false,
},
);
results.push(result);
if (result.success) {
totalPatched++;
} else {
totalFailed++;
}
}
// Build output
const output = formatPatchResults(results, params.dryRun ?? false);
// Determine overall success
const success = totalFailed === 0;
const title = params.dryRun
? PATCH_TITLES.DRY_RUN
: totalFailed === 0
? PATCH_TITLES.SUCCESS(totalPatched)
: totalPatched > 0
? PATCH_TITLES.PARTIAL(totalPatched, totalFailed)
: PATCH_TITLES.FAILED;
return {
success,
title,
output,
error: success ? undefined : `${totalFailed} file(s) failed to patch`,
};
} catch (error) {
const message = error instanceof Error ? error.message : String(error);
return {
success: false,
title: PATCH_TITLES.FAILED,
output: "",
error: PATCH_ERRORS.PARSE_FAILED(message),
};
}
};
/**
* Apply a patch to a single file
*/
const applyFilePatch = async (
filePatch: ParsedFilePatch,
targetPath: string,
options: { fuzz: number; dryRun: boolean },
): Promise<FilePatchResult> => {
const hunkResults: HunkApplicationResult[] = [];
let currentContent: string;
let originalContent: string;
try {
// Handle new files
if (filePatch.isNew) {
currentContent = "";
originalContent = "";
} else {
// Read original file
try {
currentContent = await fs.readFile(targetPath, "utf-8");
originalContent = currentContent;
} catch {
return {
success: false,
filePath: targetPath,
hunksApplied: 0,
hunksFailed: filePatch.hunks.length,
hunkResults: [],
error: PATCH_ERRORS.FILE_NOT_FOUND(targetPath),
};
}
}
// Handle deleted files
if (filePatch.isDeleted) {
if (!options.dryRun) {
// Store rollback info
rollbackStore.set(targetPath, {
filePath: targetPath,
originalContent,
patchedContent: "",
timestamp: Date.now(),
});
await fs.unlink(targetPath);
}
return {
success: true,
filePath: targetPath,
hunksApplied: 1,
hunksFailed: 0,
hunkResults: [
{
success: true,
hunkIndex: 0,
appliedAt: 0,
},
],
newContent: "",
};
}
// Apply each hunk
let hunksApplied = 0;
let hunksFailed = 0;
for (let i = 0; i < filePatch.hunks.length; i++) {
const hunk = filePatch.hunks[i];
// Check if already applied
if (isHunkApplied(currentContent, hunk, { fuzz: options.fuzz })) {
hunkResults.push({
success: true,
hunkIndex: i,
appliedAt: hunk.oldStart - 1,
});
hunksApplied++;
continue;
}
// Find position with fuzzy matching
const position = findHunkPosition(currentContent, hunk, { fuzz: options.fuzz });
if (!position.found) {
hunkResults.push({
success: false,
hunkIndex: i,
error: PATCH_ERRORS.FUZZY_MATCH_FAILED(i),
});
hunksFailed++;
continue;
}
// Apply the hunk
const preview = previewHunkApplication(currentContent, hunk, position.lineNumber);
if (!preview.success) {
hunkResults.push({
success: false,
hunkIndex: i,
error: preview.error ?? PATCH_ERRORS.HUNK_FAILED(i, "unknown"),
});
hunksFailed++;
continue;
}
currentContent = preview.preview.join("\n");
hunksApplied++;
hunkResults.push({
success: true,
hunkIndex: i,
appliedAt: position.lineNumber,
fuzzyOffset: position.offset !== 0 ? position.offset : undefined,
});
}
// Write the file if not dry run
if (!options.dryRun && hunksApplied > 0) {
// Store rollback info
rollbackStore.set(targetPath, {
filePath: targetPath,
originalContent,
patchedContent: currentContent,
timestamp: Date.now(),
});
// Ensure directory exists
await fs.mkdir(dirname(targetPath), { recursive: true });
// Write patched content
await fs.writeFile(targetPath, currentContent, "utf-8");
}
return {
success: hunksFailed === 0,
filePath: targetPath,
hunksApplied,
hunksFailed,
hunkResults,
newContent: currentContent,
};
} catch (error) {
const message = error instanceof Error ? error.message : String(error);
return {
success: false,
filePath: targetPath,
hunksApplied: 0,
hunksFailed: filePatch.hunks.length,
hunkResults,
error: PATCH_ERRORS.WRITE_FAILED(targetPath, message),
};
}
};
/**
* Format patch results for output
*/
const formatPatchResults = (
results: FilePatchResult[],
dryRun: boolean,
): string => {
const lines: string[] = [];
for (const result of results) {
lines.push(`${result.success ? "✓" : "✗"} ${result.filePath}`);
if (result.hunksApplied > 0 || result.hunksFailed > 0) {
lines.push(
` ${result.hunksApplied} hunk(s) applied, ${result.hunksFailed} failed`,
);
}
// Show fuzzy offsets
for (const hunk of result.hunkResults) {
if (hunk.fuzzyOffset) {
lines.push(
` ${PATCH_MESSAGES.FUZZY_APPLIED(hunk.hunkIndex, hunk.fuzzyOffset)}`,
);
}
}
if (result.error) {
lines.push(` Error: ${result.error}`);
}
}
if (dryRun) {
lines.push("");
lines.push("(dry run - no changes were made)");
} else if (results.some((r) => r.success)) {
lines.push("");
lines.push(PATCH_MESSAGES.ROLLBACK_AVAILABLE);
}
return lines.join("\n");
};
/**
* Rollback a patched file
*/
export const rollbackPatch = async (filePath: string): Promise<boolean> => {
const rollback = rollbackStore.get(filePath);
if (!rollback) {
return false;
}
try {
if (rollback.originalContent === "") {
// Was a new file, delete it
await fs.unlink(filePath);
} else {
await fs.writeFile(filePath, rollback.originalContent, "utf-8");
}
rollbackStore.delete(filePath);
return true;
} catch {
return false;
}
};
/**
* Get available rollbacks
*/
export const getAvailableRollbacks = (): string[] => {
return Array.from(rollbackStore.keys());
};
/**
* Clear rollback history
*/
export const clearRollbacks = (): void => {
rollbackStore.clear();
};

View File

@@ -0,0 +1,60 @@
/**
* Apply Patch Tool
*
* Applies unified diff patches to files with fuzzy matching support.
*/
import type { ToolDefinition } from "@tools/types";
import { applyPatchParams } from "@tools/apply-patch/params";
import { executeApplyPatch } from "@tools/apply-patch/execute";
export { applyPatchParams } from "@tools/apply-patch/params";
export { executeApplyPatch, rollbackPatch, getAvailableRollbacks, clearRollbacks } from "@tools/apply-patch/execute";
export { parsePatch, validatePatch, getTargetPath, reversePatch } from "@tools/apply-patch/parser";
export { findHunkPosition, isHunkApplied, previewHunkApplication } from "@tools/apply-patch/matcher";
/**
* Tool description
*/
const APPLY_PATCH_DESCRIPTION = `Apply a unified diff patch to one or more files.
Use this tool to:
- Apply changes from a diff/patch
- Update files based on patch content
- Preview changes before applying (dry run)
Parameters:
- patch: The unified diff content (required)
- targetFile: Override the target file path (optional)
- dryRun: Preview without applying changes (default: false)
- fuzz: Context line tolerance 0-3 (default: 2)
- reverse: Apply patch in reverse to undo changes (default: false)
The tool supports:
- Standard unified diff format (git diff, diff -u)
- Fuzzy context matching when lines have shifted
- Creating new files
- Deleting files
- Rollback on failure
Example patch format:
\`\`\`
--- a/src/example.ts
+++ b/src/example.ts
@@ -10,6 +10,7 @@ function example() {
const a = 1;
const b = 2;
+ const c = 3;
return a + b;
}
\`\`\``;
/**
* Apply patch tool definition
*/
export const applyPatchTool: ToolDefinition = {
name: "apply_patch",
description: APPLY_PATCH_DESCRIPTION,
parameters: applyPatchParams,
execute: executeApplyPatch,
};

View File

@@ -0,0 +1,304 @@
/**
* Fuzzy Matcher
*
* Finds patch context in target file with fuzzy matching support.
*/
import { PATCH_DEFAULTS } from "@constants/apply-patch";
import type {
PatchHunk,
FuzzyMatchResult,
ContextMatchOptions,
} from "@/types/apply-patch";
/**
* Default match options
*/
const DEFAULT_MATCH_OPTIONS: ContextMatchOptions = {
fuzz: PATCH_DEFAULTS.FUZZ,
ignoreWhitespace: PATCH_DEFAULTS.IGNORE_WHITESPACE,
ignoreCase: PATCH_DEFAULTS.IGNORE_CASE,
};
/**
* Normalize line for comparison
*/
const normalizeLine = (
line: string,
options: ContextMatchOptions,
): string => {
let normalized = line;
if (options.ignoreWhitespace) {
normalized = normalized.replace(/\s+/g, " ").trim();
}
if (options.ignoreCase) {
normalized = normalized.toLowerCase();
}
return normalized;
};
/**
* Extract context and deletion lines from hunk (lines that should exist in original)
*/
const extractOriginalLines = (hunk: PatchHunk): string[] => {
return hunk.lines
.filter((line) => line.type === "context" || line.type === "deletion")
.map((line) => line.content);
};
/**
* Check if lines match at a given position
*/
const checkMatchAtPosition = (
fileLines: string[],
originalLines: string[],
startLine: number,
options: ContextMatchOptions,
): { matches: boolean; confidence: number } => {
let matchCount = 0;
let totalLines = originalLines.length;
// Can't match if we don't have enough lines
if (startLine + originalLines.length > fileLines.length) {
return { matches: false, confidence: 0 };
}
for (let i = 0; i < originalLines.length; i++) {
const fileLine = normalizeLine(fileLines[startLine + i], options);
const patchLine = normalizeLine(originalLines[i], options);
if (fileLine === patchLine) {
matchCount++;
}
}
const confidence = totalLines > 0 ? matchCount / totalLines : 0;
// Require at least (total - fuzz) lines to match
const requiredMatches = Math.max(1, totalLines - options.fuzz);
const matches = matchCount >= requiredMatches;
return { matches, confidence };
};
/**
* Find the best match position for a hunk in file content
*/
export const findHunkPosition = (
fileContent: string,
hunk: PatchHunk,
options: Partial<ContextMatchOptions> = {},
): FuzzyMatchResult => {
const fullOptions: ContextMatchOptions = {
...DEFAULT_MATCH_OPTIONS,
...options,
};
const fileLines = fileContent.split("\n");
const originalLines = extractOriginalLines(hunk);
// If hunk has no lines to match, use the line number directly
if (originalLines.length === 0) {
const targetLine = Math.min(hunk.oldStart - 1, fileLines.length);
return {
found: true,
lineNumber: targetLine,
offset: 0,
confidence: 1,
};
}
// Expected position (0-indexed)
const expectedLine = hunk.oldStart - 1;
// First, try exact position
const exactMatch = checkMatchAtPosition(
fileLines,
originalLines,
expectedLine,
fullOptions,
);
if (exactMatch.matches && exactMatch.confidence === 1) {
return {
found: true,
lineNumber: expectedLine,
offset: 0,
confidence: exactMatch.confidence,
};
}
// Search within fuzz range
const maxOffset = fullOptions.fuzz * PATCH_DEFAULTS.CONTEXT_LINES;
let bestMatch: FuzzyMatchResult | null = null;
for (let offset = 1; offset <= maxOffset; offset++) {
// Try before expected position
const beforePos = expectedLine - offset;
if (beforePos >= 0) {
const beforeMatch = checkMatchAtPosition(
fileLines,
originalLines,
beforePos,
fullOptions,
);
if (beforeMatch.matches) {
if (!bestMatch || beforeMatch.confidence > bestMatch.confidence) {
bestMatch = {
found: true,
lineNumber: beforePos,
offset: -offset,
confidence: beforeMatch.confidence,
};
}
}
}
// Try after expected position
const afterPos = expectedLine + offset;
if (afterPos < fileLines.length) {
const afterMatch = checkMatchAtPosition(
fileLines,
originalLines,
afterPos,
fullOptions,
);
if (afterMatch.matches) {
if (!bestMatch || afterMatch.confidence > bestMatch.confidence) {
bestMatch = {
found: true,
lineNumber: afterPos,
offset: offset,
confidence: afterMatch.confidence,
};
}
}
}
// If we found a perfect match, stop searching
if (bestMatch && bestMatch.confidence === 1) {
break;
}
}
// Return best match if found
if (bestMatch) {
return bestMatch;
}
// If exact position had a partial match, return it
if (exactMatch.confidence > 0.5) {
return {
found: true,
lineNumber: expectedLine,
offset: 0,
confidence: exactMatch.confidence,
};
}
return {
found: false,
lineNumber: -1,
offset: 0,
confidence: 0,
};
};
/**
* Check if a hunk is already applied (deletions are gone, additions are present)
*/
export const isHunkApplied = (
fileContent: string,
hunk: PatchHunk,
options: Partial<ContextMatchOptions> = {},
): boolean => {
const fullOptions: ContextMatchOptions = {
...DEFAULT_MATCH_OPTIONS,
...options,
};
const fileLines = fileContent.split("\n");
// Check if additions are present and deletions are not
let additionsPresent = 0;
let deletionsAbsent = 0;
for (const line of hunk.lines) {
const normalizedContent = normalizeLine(line.content, fullOptions);
if (line.type === "addition") {
const found = fileLines.some(
(fl) => normalizeLine(fl, fullOptions) === normalizedContent,
);
if (found) additionsPresent++;
}
if (line.type === "deletion") {
const found = fileLines.some(
(fl) => normalizeLine(fl, fullOptions) === normalizedContent,
);
if (!found) deletionsAbsent++;
}
}
const totalAdditions = hunk.lines.filter((l) => l.type === "addition").length;
const totalDeletions = hunk.lines.filter((l) => l.type === "deletion").length;
// Consider applied if most additions are present and most deletions are absent
const additionsMatch =
totalAdditions === 0 || additionsPresent >= totalAdditions * 0.8;
const deletionsMatch =
totalDeletions === 0 || deletionsAbsent >= totalDeletions * 0.8;
return additionsMatch && deletionsMatch;
};
/**
* Calculate the expected result of applying a hunk
*/
export const previewHunkApplication = (
fileContent: string,
hunk: PatchHunk,
position: number,
): { success: boolean; preview: string[]; error?: string } => {
const fileLines = fileContent.split("\n");
const resultLines: string[] = [];
// Copy lines before the hunk
for (let i = 0; i < position; i++) {
resultLines.push(fileLines[i]);
}
// Calculate how many lines to skip from the original file
let originalLinesConsumed = 0;
for (const line of hunk.lines) {
if (line.type === "context" || line.type === "deletion") {
originalLinesConsumed++;
}
}
// Apply hunk transformations
for (const line of hunk.lines) {
if (line.type === "context") {
resultLines.push(line.content);
} else if (line.type === "addition") {
resultLines.push(line.content);
}
// Deletions are skipped (not added to result)
}
// Copy lines after the hunk
for (let i = position + originalLinesConsumed; i < fileLines.length; i++) {
resultLines.push(fileLines[i]);
}
return {
success: true,
preview: resultLines,
};
};

View File

@@ -0,0 +1,43 @@
/**
* Apply Patch Tool Parameters
*/
import { z } from "zod";
import { PATCH_DEFAULTS } from "@constants/apply-patch";
/**
* Zod schema for apply_patch tool parameters
*/
export const applyPatchParams = z.object({
patch: z
.string()
.describe("The unified diff patch content to apply"),
targetFile: z
.string()
.optional()
.describe("Override the target file path from the patch header"),
dryRun: z
.boolean()
.optional()
.default(false)
.describe("Validate and preview changes without actually applying them"),
fuzz: z
.number()
.int()
.min(0)
.max(PATCH_DEFAULTS.MAX_FUZZ)
.optional()
.default(PATCH_DEFAULTS.FUZZ)
.describe(`Context line tolerance for fuzzy matching (0-${PATCH_DEFAULTS.MAX_FUZZ})`),
reverse: z
.boolean()
.optional()
.default(false)
.describe("Apply the patch in reverse (undo the changes)"),
});
export type ApplyPatchParams = z.infer<typeof applyPatchParams>;

View File

@@ -0,0 +1,387 @@
/**
* Patch Parser
*
* Parses unified diff format patches into structured data.
*/
import {
PATCH_PATTERNS,
LINE_PREFIXES,
SPECIAL_PATHS,
PATCH_ERRORS,
} from "@constants/apply-patch";
import type {
ParsedPatch,
ParsedFilePatch,
PatchHunk,
PatchLine,
PatchLineType,
PatchValidationResult,
} from "@/types/apply-patch";
/**
* Parse a unified diff patch string
*/
export const parsePatch = (patchContent: string): ParsedPatch => {
const lines = patchContent.split("\n");
const files: ParsedFilePatch[] = [];
let currentFile: ParsedFilePatch | null = null;
let currentHunk: PatchHunk | null = null;
let lineIndex = 0;
while (lineIndex < lines.length) {
const line = lines[lineIndex];
// Git diff header
const gitDiffMatch = line.match(PATCH_PATTERNS.GIT_DIFF);
if (gitDiffMatch) {
if (currentFile && (currentFile.hunks.length > 0 || currentFile.isBinary)) {
files.push(currentFile);
}
currentFile = createEmptyFilePatch(gitDiffMatch[1], gitDiffMatch[2]);
currentHunk = null;
lineIndex++;
continue;
}
// File header old
const oldHeaderMatch = line.match(PATCH_PATTERNS.FILE_HEADER_OLD);
if (oldHeaderMatch) {
if (!currentFile) {
currentFile = createEmptyFilePatch("", "");
}
currentFile.oldPath = cleanPath(oldHeaderMatch[1]);
if (currentFile.oldPath === SPECIAL_PATHS.DEV_NULL) {
currentFile.isNew = true;
}
lineIndex++;
continue;
}
// File header new
const newHeaderMatch = line.match(PATCH_PATTERNS.FILE_HEADER_NEW);
if (newHeaderMatch) {
if (!currentFile) {
currentFile = createEmptyFilePatch("", "");
}
currentFile.newPath = cleanPath(newHeaderMatch[1]);
if (currentFile.newPath === SPECIAL_PATHS.DEV_NULL) {
currentFile.isDeleted = true;
}
lineIndex++;
continue;
}
// Index line (skip)
if (PATCH_PATTERNS.INDEX_LINE.test(line)) {
lineIndex++;
continue;
}
// Binary file
if (PATCH_PATTERNS.BINARY_FILE.test(line)) {
if (currentFile) {
currentFile.isBinary = true;
}
lineIndex++;
continue;
}
// New file mode
if (PATCH_PATTERNS.NEW_FILE.test(line)) {
if (currentFile) {
currentFile.isNew = true;
}
lineIndex++;
continue;
}
// Deleted file mode
if (PATCH_PATTERNS.DELETED_FILE.test(line)) {
if (currentFile) {
currentFile.isDeleted = true;
}
lineIndex++;
continue;
}
// Rename from
const renameFromMatch = line.match(PATCH_PATTERNS.RENAME_FROM);
if (renameFromMatch) {
if (currentFile) {
currentFile.isRenamed = true;
currentFile.oldPath = cleanPath(renameFromMatch[1]);
}
lineIndex++;
continue;
}
// Rename to
const renameToMatch = line.match(PATCH_PATTERNS.RENAME_TO);
if (renameToMatch) {
if (currentFile) {
currentFile.newPath = cleanPath(renameToMatch[1]);
}
lineIndex++;
continue;
}
// Hunk header
const hunkMatch = line.match(PATCH_PATTERNS.HUNK_HEADER);
if (hunkMatch) {
if (currentHunk && currentFile) {
currentFile.hunks.push(currentHunk);
}
currentHunk = {
oldStart: parseInt(hunkMatch[1], 10),
oldLines: hunkMatch[2] ? parseInt(hunkMatch[2], 10) : 1,
newStart: parseInt(hunkMatch[3], 10),
newLines: hunkMatch[4] ? parseInt(hunkMatch[4], 10) : 1,
lines: [],
header: line,
};
lineIndex++;
continue;
}
// Patch lines (context, addition, deletion)
if (currentHunk) {
const patchLine = parsePatchLine(line, currentHunk);
if (patchLine) {
currentHunk.lines.push(patchLine);
}
}
lineIndex++;
}
// Push final hunk and file
if (currentHunk && currentFile) {
currentFile.hunks.push(currentHunk);
}
if (currentFile && (currentFile.hunks.length > 0 || currentFile.isBinary)) {
files.push(currentFile);
}
return {
files,
rawPatch: patchContent,
};
};
/**
* Create empty file patch structure
*/
const createEmptyFilePatch = (oldPath: string, newPath: string): ParsedFilePatch => ({
oldPath: cleanPath(oldPath),
newPath: cleanPath(newPath),
hunks: [],
isBinary: false,
isNew: false,
isDeleted: false,
isRenamed: false,
});
/**
* Clean path by removing a/ or b/ prefixes
*/
const cleanPath = (path: string): string => {
if (path.startsWith(SPECIAL_PATHS.A_PREFIX)) {
return path.slice(2);
}
if (path.startsWith(SPECIAL_PATHS.B_PREFIX)) {
return path.slice(2);
}
return path;
};
/**
* Parse a single patch line
*/
const parsePatchLine = (line: string, _hunk: PatchHunk): PatchLine | null => {
// No newline marker (skip but keep in mind)
if (PATCH_PATTERNS.NO_NEWLINE.test(line)) {
return null;
}
// Empty line at end of patch
if (line === "") {
return null;
}
const prefix = line[0];
const content = line.slice(1);
const typeMap: Record<string, PatchLineType> = {
[LINE_PREFIXES.CONTEXT]: "context",
[LINE_PREFIXES.ADDITION]: "addition",
[LINE_PREFIXES.DELETION]: "deletion",
};
const type = typeMap[prefix];
if (!type) {
// Unknown line type, treat as context if it looks like content
return {
type: "context",
content: line,
};
}
return {
type,
content,
};
};
/**
* Validate a parsed patch
*/
export const validatePatch = (patch: ParsedPatch): PatchValidationResult => {
const errors: string[] = [];
const warnings: string[] = [];
let hunkCount = 0;
if (patch.files.length === 0) {
errors.push(PATCH_ERRORS.INVALID_PATCH);
return {
valid: false,
errors,
warnings,
fileCount: 0,
hunkCount: 0,
};
}
for (const file of patch.files) {
// Check for binary files
if (file.isBinary) {
warnings.push(PATCH_ERRORS.BINARY_NOT_SUPPORTED);
continue;
}
// Check file paths
if (!file.newPath && !file.isDeleted) {
errors.push(`Missing target path for file`);
}
// Validate hunks
for (const hunk of file.hunks) {
hunkCount++;
// Count lines
let contextCount = 0;
let additionCount = 0;
let deletionCount = 0;
for (const line of hunk.lines) {
if (line.type === "context") contextCount++;
if (line.type === "addition") additionCount++;
if (line.type === "deletion") deletionCount++;
}
// Verify hunk line counts
const expectedOld = contextCount + deletionCount;
const expectedNew = contextCount + additionCount;
if (expectedOld !== hunk.oldLines) {
warnings.push(
`Hunk line count mismatch: expected ${hunk.oldLines} old lines, found ${expectedOld}`,
);
}
if (expectedNew !== hunk.newLines) {
warnings.push(
`Hunk line count mismatch: expected ${hunk.newLines} new lines, found ${expectedNew}`,
);
}
}
}
return {
valid: errors.length === 0,
errors,
warnings,
fileCount: patch.files.length,
hunkCount,
};
};
/**
* Get the target file path from a parsed file patch
*/
export const getTargetPath = (filePatch: ParsedFilePatch): string => {
// For new files, use the new path
if (filePatch.isNew) {
return filePatch.newPath;
}
// For deleted files, use the old path
if (filePatch.isDeleted) {
return filePatch.oldPath;
}
// For renames, we want to modify the old file and rename to new
// For regular patches, prefer newPath but fall back to oldPath
return filePatch.newPath || filePatch.oldPath;
};
/**
* Check if a patch appears to be reversed
*/
export const isPatchReversed = (
patch: ParsedFilePatch,
fileContent: string,
): boolean => {
// Simple heuristic: check if the "added" lines are present in the file
// and "deleted" lines are not
const fileLines = new Set(fileContent.split("\n"));
let addedPresent = 0;
let deletedPresent = 0;
for (const hunk of patch.hunks) {
for (const line of hunk.lines) {
if (line.type === "addition" && fileLines.has(line.content)) {
addedPresent++;
}
if (line.type === "deletion" && fileLines.has(line.content)) {
deletedPresent++;
}
}
}
// If added lines are present and deleted lines are not, patch is reversed
return addedPresent > deletedPresent * 2;
};
/**
* Reverse a patch (swap additions and deletions)
*/
export const reversePatch = (patch: ParsedFilePatch): ParsedFilePatch => {
return {
...patch,
oldPath: patch.newPath,
newPath: patch.oldPath,
isNew: patch.isDeleted,
isDeleted: patch.isNew,
hunks: patch.hunks.map((hunk) => ({
...hunk,
oldStart: hunk.newStart,
oldLines: hunk.newLines,
newStart: hunk.oldStart,
newLines: hunk.oldLines,
lines: hunk.lines.map((line) => ({
...line,
type:
line.type === "addition"
? "deletion"
: line.type === "deletion"
? "addition"
: line.type,
})),
})),
};
};

View File

@@ -12,7 +12,10 @@ export { todoReadTool } from "@tools/todo-read";
export { globToolDefinition } from "@tools/glob/definition";
export { grepToolDefinition } from "@tools/grep/definition";
export { webSearchTool } from "@tools/web-search";
export { webFetchTool } from "@tools/web-fetch";
export { multiEditTool } from "@tools/multi-edit";
export { lspTool } from "@tools/lsp";
export { applyPatchTool } from "@tools/apply-patch";
import type { ToolDefinition, FunctionDefinition } from "@tools/types";
import { toolToFunction } from "@tools/types";
@@ -25,7 +28,10 @@ import { todoReadTool } from "@tools/todo-read";
import { globToolDefinition } from "@tools/glob/definition";
import { grepToolDefinition } from "@tools/grep/definition";
import { webSearchTool } from "@tools/web-search";
import { webFetchTool } from "@tools/web-fetch";
import { multiEditTool } from "@tools/multi-edit";
import { lspTool } from "@tools/lsp";
import { applyPatchTool } from "@tools/apply-patch";
import {
isMCPTool,
executeMCPTool,
@@ -44,12 +50,15 @@ export const tools: ToolDefinition[] = [
readTool,
writeTool,
editTool,
multiEditTool,
globToolDefinition,
grepToolDefinition,
todoWriteTool,
todoReadTool,
webSearchTool,
webFetchTool,
lspTool,
applyPatchTool,
];
// Tools that are read-only (allowed in chat mode)
@@ -59,6 +68,7 @@ const READ_ONLY_TOOLS = new Set([
"grep",
"todo_read",
"web_search",
"web_fetch",
"lsp",
]);

View File

@@ -0,0 +1,343 @@
/**
* MultiEdit Tool Execution
*
* Performs batch file editing with atomic transactions
*/
import fs from "fs/promises";
import path from "path";
import {
MULTI_EDIT_DEFAULTS,
MULTI_EDIT_MESSAGES,
MULTI_EDIT_TITLES,
MULTI_EDIT_DESCRIPTION,
} from "@constants/multi-edit";
import { isFileOpAllowed, promptFilePermission } from "@services/permissions";
import { formatDiff, generateDiff } from "@utils/diff";
import { multiEditParams } from "@tools/multi-edit/params";
import type { ToolDefinition, ToolContext, ToolResult } from "@/types/tools";
import type { EditItem, MultiEditParams } from "@tools/multi-edit/params";
interface FileBackup {
path: string;
content: string;
}
interface EditValidation {
valid: boolean;
error?: string;
fileContent?: string;
}
interface EditResult {
path: string;
success: boolean;
diff?: string;
additions?: number;
deletions?: number;
error?: string;
}
const createErrorResult = (error: string): ToolResult => ({
success: false,
title: MULTI_EDIT_TITLES.FAILED,
output: "",
error,
});
const createSuccessResult = (
results: EditResult[],
totalEdits: number,
): ToolResult => {
const successful = results.filter((r) => r.success);
const failed = results.filter((r) => !r.success);
const diffOutput = successful
.map((r) => `## ${path.basename(r.path)}\n\n${r.diff}`)
.join("\n\n---\n\n");
const totalAdditions = successful.reduce((sum, r) => sum + (r.additions ?? 0), 0);
const totalDeletions = successful.reduce((sum, r) => sum + (r.deletions ?? 0), 0);
const title =
failed.length > 0
? MULTI_EDIT_TITLES.PARTIAL(successful.length, failed.length)
: MULTI_EDIT_TITLES.SUCCESS(successful.length);
let output = diffOutput;
if (failed.length > 0) {
output +=
"\n\n## Failed Edits\n\n" +
failed.map((r) => `- ${r.path}: ${r.error}`).join("\n");
}
return {
success: failed.length === 0,
title,
output,
metadata: {
totalEdits,
successful: successful.length,
failed: failed.length,
totalAdditions,
totalDeletions,
},
};
};
/**
* Validate a single edit
*/
const validateEdit = async (
edit: EditItem,
workingDir: string,
): Promise<EditValidation> => {
const fullPath = path.isAbsolute(edit.file_path)
? edit.file_path
: path.join(workingDir, edit.file_path);
try {
const stat = await fs.stat(fullPath);
if (!stat.isFile()) {
return { valid: false, error: `Not a file: ${edit.file_path}` };
}
if (stat.size > MULTI_EDIT_DEFAULTS.MAX_FILE_SIZE) {
return { valid: false, error: MULTI_EDIT_MESSAGES.FILE_TOO_LARGE(edit.file_path) };
}
const content = await fs.readFile(fullPath, "utf-8");
// Check if old_string exists
if (!content.includes(edit.old_string)) {
const preview = edit.old_string.slice(0, 50);
return {
valid: false,
error: MULTI_EDIT_MESSAGES.OLD_STRING_NOT_FOUND(edit.file_path, preview),
};
}
// Check uniqueness
const occurrences = content.split(edit.old_string).length - 1;
if (occurrences > 1) {
return {
valid: false,
error: MULTI_EDIT_MESSAGES.OLD_STRING_NOT_UNIQUE(edit.file_path, occurrences),
};
}
return { valid: true, fileContent: content };
} catch (error) {
if ((error as NodeJS.ErrnoException).code === "ENOENT") {
return { valid: false, error: MULTI_EDIT_MESSAGES.FILE_NOT_FOUND(edit.file_path) };
}
const message = error instanceof Error ? error.message : String(error);
return { valid: false, error: message };
}
};
/**
* Check permissions for all files
*/
const checkPermissions = async (
edits: EditItem[],
workingDir: string,
autoApprove: boolean,
): Promise<{ allowed: boolean; denied: string[] }> => {
const denied: string[] = [];
for (const edit of edits) {
const fullPath = path.isAbsolute(edit.file_path)
? edit.file_path
: path.join(workingDir, edit.file_path);
if (!autoApprove && !isFileOpAllowed("Edit", fullPath)) {
const { allowed } = await promptFilePermission(
"Edit",
fullPath,
`Edit file: ${edit.file_path}`,
);
if (!allowed) {
denied.push(edit.file_path);
}
}
}
return { allowed: denied.length === 0, denied };
};
/**
* Apply a single edit
*/
const applyEdit = async (
edit: EditItem,
workingDir: string,
fileContent: string,
): Promise<EditResult> => {
const fullPath = path.isAbsolute(edit.file_path)
? edit.file_path
: path.join(workingDir, edit.file_path);
try {
const newContent = fileContent.replace(edit.old_string, edit.new_string);
const diff = generateDiff(fileContent, newContent);
const relativePath = path.relative(workingDir, fullPath);
const diffOutput = formatDiff(diff, relativePath);
await fs.writeFile(fullPath, newContent, "utf-8");
return {
path: edit.file_path,
success: true,
diff: diffOutput,
additions: diff.additions,
deletions: diff.deletions,
};
} catch (error) {
const message = error instanceof Error ? error.message : String(error);
return {
path: edit.file_path,
success: false,
error: message,
};
}
};
/**
* Rollback changes using backups
*/
const rollback = async (backups: FileBackup[]): Promise<void> => {
for (const backup of backups) {
try {
await fs.writeFile(backup.path, backup.content, "utf-8");
} catch {
// Best effort rollback
}
}
};
/**
* Execute multi-edit
*/
export const executeMultiEdit = async (
args: MultiEditParams,
ctx: ToolContext,
): Promise<ToolResult> => {
const { edits } = args;
// Validate edit count
if (edits.length === 0) {
return createErrorResult(MULTI_EDIT_MESSAGES.NO_EDITS);
}
if (edits.length > MULTI_EDIT_DEFAULTS.MAX_EDITS) {
return createErrorResult(
MULTI_EDIT_MESSAGES.TOO_MANY_EDITS(MULTI_EDIT_DEFAULTS.MAX_EDITS),
);
}
ctx.onMetadata?.({
title: MULTI_EDIT_TITLES.VALIDATING(edits.length),
status: "running",
});
// Phase 1: Validate all edits
const validations = new Map<string, { validation: EditValidation; edit: EditItem }>();
const errors: string[] = [];
for (const edit of edits) {
const validation = await validateEdit(edit, ctx.workingDir);
validations.set(edit.file_path, { validation, edit });
if (!validation.valid) {
errors.push(validation.error ?? "Unknown error");
}
}
if (errors.length > 0) {
return createErrorResult(
MULTI_EDIT_MESSAGES.VALIDATION_FAILED + ":\n" + errors.join("\n"),
);
}
// Phase 2: Check permissions
const permCheck = await checkPermissions(
edits,
ctx.workingDir,
ctx.autoApprove ?? false,
);
if (!permCheck.allowed) {
return createErrorResult(
`Permission denied for: ${permCheck.denied.join(", ")}`,
);
}
// Phase 3: Create backups and apply edits atomically
const backups: FileBackup[] = [];
const results: EditResult[] = [];
let failed = false;
for (let i = 0; i < edits.length; i++) {
const edit = edits[i];
const data = validations.get(edit.file_path);
if (!data?.validation.fileContent) continue;
ctx.onMetadata?.({
title: MULTI_EDIT_TITLES.APPLYING(i + 1, edits.length),
status: "running",
});
const fullPath = path.isAbsolute(edit.file_path)
? edit.file_path
: path.join(ctx.workingDir, edit.file_path);
// Create backup
backups.push({
path: fullPath,
content: data.validation.fileContent,
});
// Apply edit
const result = await applyEdit(edit, ctx.workingDir, data.validation.fileContent);
results.push(result);
if (!result.success) {
failed = true;
break;
}
// Update file content for subsequent edits to same file
if (result.success) {
const newContent = data.validation.fileContent.replace(
edit.old_string,
edit.new_string,
);
// Update the validation cache for potential subsequent edits to same file
validations.set(edit.file_path, {
...data,
validation: { ...data.validation, fileContent: newContent },
});
}
}
// Phase 4: Rollback if any edit failed
if (failed) {
ctx.onMetadata?.({
title: MULTI_EDIT_TITLES.ROLLBACK,
status: "running",
});
await rollback(backups);
return createErrorResult(MULTI_EDIT_MESSAGES.ATOMIC_FAILURE);
}
return createSuccessResult(results, edits.length);
};
export const multiEditTool: ToolDefinition<typeof multiEditParams> = {
name: "multi_edit",
description: MULTI_EDIT_DESCRIPTION,
parameters: multiEditParams,
execute: executeMultiEdit,
};

View File

@@ -0,0 +1,13 @@
/**
* MultiEdit Tool
*
* Batch file editing with atomic transactions
*/
export { multiEditTool, executeMultiEdit } from "@tools/multi-edit/execute";
export {
multiEditParams,
editItemSchema,
type EditItem,
type MultiEditParams,
} from "@tools/multi-edit/params";

View File

@@ -0,0 +1,21 @@
/**
* MultiEdit Tool Parameters
*/
import { z } from "zod";
export const editItemSchema = z.object({
file_path: z.string().describe("Absolute path to the file to edit"),
old_string: z.string().describe("The exact text to find and replace"),
new_string: z.string().describe("The replacement text"),
});
export const multiEditParams = z.object({
edits: z
.array(editItemSchema)
.min(1)
.describe("Array of edits to apply atomically"),
});
export type EditItem = z.infer<typeof editItemSchema>;
export type MultiEditParams = z.infer<typeof multiEditParams>;

View File

@@ -0,0 +1,346 @@
/**
* WebFetch Tool Execution
*
* Fetches content from URLs and converts HTML to markdown
*/
import {
WEB_FETCH_DEFAULTS,
WEB_FETCH_MESSAGES,
WEB_FETCH_TITLES,
WEB_FETCH_DESCRIPTION,
HTML_REMOVE_ELEMENTS,
} from "@constants/web-fetch";
import { webFetchParams } from "@tools/web-fetch/params";
import type { ToolDefinition, ToolContext, ToolResult } from "@/types/tools";
import type { WebFetchParams } from "@tools/web-fetch/params";
const createErrorResult = (error: string): ToolResult => ({
success: false,
title: WEB_FETCH_TITLES.FAILED,
output: "",
error,
});
const createSuccessResult = (
url: string,
content: string,
contentType: string,
): ToolResult => ({
success: true,
title: WEB_FETCH_TITLES.SUCCESS,
output: content,
metadata: {
url,
contentType,
contentLength: content.length,
},
});
/**
* Validate URL format
*/
const validateUrl = (url: string): URL | null => {
try {
const parsed = new URL(url);
// Upgrade HTTP to HTTPS
if (parsed.protocol === "http:") {
parsed.protocol = "https:";
}
if (!["https:", "http:"].includes(parsed.protocol)) {
return null;
}
return parsed;
} catch {
return null;
}
};
/**
* Remove HTML elements by tag name
*/
const removeElements = (html: string, tags: string[]): string => {
let result = html;
for (const tag of tags) {
// Remove self-closing and regular tags
const selfClosingPattern = new RegExp(`<${tag}[^>]*/>`, "gi");
const openClosePattern = new RegExp(
`<${tag}[^>]*>[\\s\\S]*?</${tag}>`,
"gi",
);
result = result.replace(selfClosingPattern, "");
result = result.replace(openClosePattern, "");
}
return result;
};
/**
* Decode HTML entities
*/
const decodeHtmlEntities = (text: string): string => {
const entities: Record<string, string> = {
"&amp;": "&",
"&lt;": "<",
"&gt;": ">",
"&quot;": '"',
"&#39;": "'",
"&nbsp;": " ",
"&#x27;": "'",
"&#x2F;": "/",
"&mdash;": "—",
"&ndash;": "",
"&hellip;": "…",
"&rsquo;": "'",
"&lsquo;": "'",
"&rdquo;": '"',
"&ldquo;": '"',
"&copy;": "©",
"&reg;": "®",
"&trade;": "™",
};
let decoded = text;
for (const [entity, char] of Object.entries(entities)) {
decoded = decoded.replace(new RegExp(entity, "g"), char);
}
// Handle numeric entities
decoded = decoded.replace(/&#(\d+);/g, (_, code) =>
String.fromCharCode(parseInt(code, 10)),
);
decoded = decoded.replace(/&#x([0-9a-fA-F]+);/g, (_, code) =>
String.fromCharCode(parseInt(code, 16)),
);
return decoded;
};
/**
* Convert HTML to markdown
*/
const htmlToMarkdown = (html: string): string => {
// Remove unwanted elements
let content = removeElements(html, HTML_REMOVE_ELEMENTS);
// Extract body content if present
const bodyMatch = content.match(/<body[^>]*>([\s\S]*)<\/body>/i);
if (bodyMatch) {
content = bodyMatch[1];
}
// Convert headers
content = content.replace(/<h1[^>]*>([\s\S]*?)<\/h1>/gi, "\n# $1\n");
content = content.replace(/<h2[^>]*>([\s\S]*?)<\/h2>/gi, "\n## $1\n");
content = content.replace(/<h3[^>]*>([\s\S]*?)<\/h3>/gi, "\n### $1\n");
content = content.replace(/<h4[^>]*>([\s\S]*?)<\/h4>/gi, "\n#### $1\n");
content = content.replace(/<h5[^>]*>([\s\S]*?)<\/h5>/gi, "\n##### $1\n");
content = content.replace(/<h6[^>]*>([\s\S]*?)<\/h6>/gi, "\n###### $1\n");
// Convert links
content = content.replace(
/<a[^>]*href="([^"]*)"[^>]*>([\s\S]*?)<\/a>/gi,
"[$2]($1)",
);
// Convert images
content = content.replace(
/<img[^>]*src="([^"]*)"[^>]*alt="([^"]*)"[^>]*\/?>/gi,
"![$2]($1)",
);
content = content.replace(/<img[^>]*src="([^"]*)"[^>]*\/?>/gi, "![]($1)");
// Convert emphasis
content = content.replace(/<strong[^>]*>([\s\S]*?)<\/strong>/gi, "**$1**");
content = content.replace(/<b[^>]*>([\s\S]*?)<\/b>/gi, "**$1**");
content = content.replace(/<em[^>]*>([\s\S]*?)<\/em>/gi, "*$1*");
content = content.replace(/<i[^>]*>([\s\S]*?)<\/i>/gi, "*$1*");
// Convert code
content = content.replace(/<code[^>]*>([\s\S]*?)<\/code>/gi, "`$1`");
content = content.replace(
/<pre[^>]*>([\s\S]*?)<\/pre>/gi,
"\n```\n$1\n```\n",
);
// Convert lists
content = content.replace(/<li[^>]*>([\s\S]*?)<\/li>/gi, "- $1\n");
content = content.replace(/<\/?[ou]l[^>]*>/gi, "\n");
// Convert paragraphs and line breaks
content = content.replace(/<p[^>]*>([\s\S]*?)<\/p>/gi, "\n$1\n");
content = content.replace(/<br\s*\/?>/gi, "\n");
content = content.replace(/<hr\s*\/?>/gi, "\n---\n");
// Convert blockquotes
content = content.replace(
/<blockquote[^>]*>([\s\S]*?)<\/blockquote>/gi,
(_, text) => {
return text
.split("\n")
.map((line: string) => `> ${line}`)
.join("\n");
},
);
// Remove remaining HTML tags
content = content.replace(/<[^>]+>/g, "");
// Decode HTML entities
content = decodeHtmlEntities(content);
// Clean up whitespace
content = content.replace(/\n{3,}/g, "\n\n");
content = content.replace(/[ \t]+/g, " ");
content = content.trim();
return content;
};
/**
* Format JSON for readability
*/
const formatJson = (json: string): string => {
try {
const parsed = JSON.parse(json);
return "```json\n" + JSON.stringify(parsed, null, 2) + "\n```";
} catch {
return json;
}
};
/**
* Process content based on content type
*/
const processContent = (content: string, contentType: string): string => {
const type = contentType.toLowerCase();
if (type.includes("json")) {
return formatJson(content);
}
if (type.includes("html") || type.includes("xhtml")) {
return htmlToMarkdown(content);
}
// Plain text, markdown, etc.
return content;
};
/**
* Truncate content if too large
*/
const truncateContent = (content: string, maxLength: number): string => {
if (content.length <= maxLength) {
return content;
}
const truncated = content.slice(0, maxLength);
const lastNewline = truncated.lastIndexOf("\n");
const cutPoint = lastNewline > maxLength * 0.8 ? lastNewline : maxLength;
return (
truncated.slice(0, cutPoint) +
"\n\n... (content truncated, showing first " +
Math.round(maxLength / 1000) +
"KB)"
);
};
/**
* Execute web fetch
*/
export const executeWebFetch = async (
args: WebFetchParams,
ctx: ToolContext,
): Promise<ToolResult> => {
const { url, timeout = WEB_FETCH_DEFAULTS.TIMEOUT_MS } = args;
if (!url || url.trim().length === 0) {
return createErrorResult(WEB_FETCH_MESSAGES.URL_REQUIRED);
}
const parsedUrl = validateUrl(url);
if (!parsedUrl) {
return createErrorResult(WEB_FETCH_MESSAGES.INVALID_URL(url));
}
ctx.onMetadata?.({
title: WEB_FETCH_TITLES.FETCHING(parsedUrl.hostname),
status: "running",
});
try {
// Create abort controller with timeout
const controller = new AbortController();
const timeoutId = setTimeout(() => controller.abort(), timeout);
// Merge with context abort signal
ctx.abort.signal.addEventListener("abort", () => controller.abort());
const response = await fetch(parsedUrl.toString(), {
headers: {
"User-Agent": WEB_FETCH_DEFAULTS.USER_AGENT,
Accept:
"text/html,application/xhtml+xml,application/xml;q=0.9,application/json,text/plain;q=0.8",
"Accept-Language": "en-US,en;q=0.9",
},
signal: controller.signal,
redirect: "follow",
});
clearTimeout(timeoutId);
if (!response.ok) {
return createErrorResult(
WEB_FETCH_MESSAGES.FETCH_ERROR(`HTTP ${response.status}`),
);
}
// Check for redirect to different host
const finalUrl = new URL(response.url);
if (finalUrl.host !== parsedUrl.host) {
return {
success: true,
title: WEB_FETCH_TITLES.SUCCESS,
output: WEB_FETCH_MESSAGES.REDIRECT_DETECTED(
parsedUrl.host,
finalUrl.host,
),
metadata: {
redirectUrl: response.url,
originalUrl: url,
},
};
}
const contentType = response.headers.get("content-type") || "text/plain";
let content = await response.text();
// Check content length
if (content.length > WEB_FETCH_DEFAULTS.MAX_CONTENT_LENGTH) {
content = truncateContent(
content,
WEB_FETCH_DEFAULTS.MAX_CONTENT_LENGTH,
);
}
// Process content based on type
const processed = processContent(content, contentType);
return createSuccessResult(response.url, processed, contentType);
} catch (error) {
if (ctx.abort.signal.aborted) {
return createErrorResult(WEB_FETCH_MESSAGES.TIMEOUT);
}
const message = error instanceof Error ? error.message : String(error);
return createErrorResult(WEB_FETCH_MESSAGES.FETCH_ERROR(message));
}
};
export const webFetchTool: ToolDefinition<typeof webFetchParams> = {
name: "web_fetch",
description: WEB_FETCH_DESCRIPTION,
parameters: webFetchParams,
execute: executeWebFetch,
};

View File

@@ -0,0 +1,8 @@
/**
* WebFetch Tool
*
* Fetch and convert web content to markdown
*/
export { webFetchTool, executeWebFetch } from "@tools/web-fetch/execute";
export { webFetchParams, type WebFetchParams } from "@tools/web-fetch/params";

View File

@@ -0,0 +1,19 @@
/**
* WebFetch Tool Parameters
*/
import { z } from "zod";
export const webFetchParams = z.object({
url: z.string().describe("The URL to fetch content from"),
prompt: z
.string()
.optional()
.describe("Optional prompt to extract specific information from the content"),
timeout: z
.number()
.optional()
.describe("Timeout in milliseconds (default: 30000)"),
});
export type WebFetchParams = z.infer<typeof webFetchParams>;

View File

@@ -1,7 +1,7 @@
/**
* Web Search Tool Execution
*
* Uses DuckDuckGo HTML search (no API key required)
* Uses Bing RSS search (no API key required, no captcha)
*/
import {
@@ -55,69 +55,6 @@ const createSuccessResult = (
};
};
/**
* Parse DuckDuckGo HTML search results
*/
const parseSearchResults = (html: string, maxResults: number): SearchResult[] => {
const results: SearchResult[] = [];
// DuckDuckGo lite HTML structure parsing
// Look for result links and snippets
const resultPattern =
/<a[^>]+class="result-link"[^>]*href="([^"]+)"[^>]*>([^<]+)<\/a>[\s\S]*?<td[^>]*class="result-snippet"[^>]*>([^<]+)/gi;
// Alternative pattern for standard DuckDuckGo HTML
const altPattern =
/<a[^>]+rel="nofollow"[^>]*href="([^"]+)"[^>]*>([^<]+)<\/a>[\s\S]*?<span[^>]*>([^<]{20,})/gi;
// Try result-link pattern first
let match: RegExpExecArray | null;
while ((match = resultPattern.exec(html)) !== null && results.length < maxResults) {
const [, url, title, snippet] = match;
if (url && title && !url.includes("duckduckgo.com")) {
results.push({
title: decodeHtmlEntities(title.trim()),
url: decodeUrl(url),
snippet: decodeHtmlEntities(snippet.trim()),
});
}
}
// If no results, try alternative pattern
if (results.length === 0) {
while ((match = altPattern.exec(html)) !== null && results.length < maxResults) {
const [, url, title, snippet] = match;
if (url && title && !url.includes("duckduckgo.com")) {
results.push({
title: decodeHtmlEntities(title.trim()),
url: decodeUrl(url),
snippet: decodeHtmlEntities(snippet.trim()),
});
}
}
}
// Fallback: extract any external links with reasonable text
if (results.length === 0) {
const linkPattern = /<a[^>]+href="(https?:\/\/(?!duckduckgo)[^"]+)"[^>]*>([^<]{10,100})<\/a>/gi;
const seenUrls = new Set<string>();
while ((match = linkPattern.exec(html)) !== null && results.length < maxResults) {
const [, url, title] = match;
if (!seenUrls.has(url) && !url.includes("duckduckgo")) {
seenUrls.add(url);
results.push({
title: decodeHtmlEntities(title.trim()),
url: decodeUrl(url),
snippet: "",
});
}
}
}
return results;
};
/**
* Decode HTML entities
*/
@@ -147,21 +84,36 @@ const decodeHtmlEntities = (text: string): string => {
};
/**
* Decode DuckDuckGo redirect URLs
* Parse Bing RSS search results
*/
const decodeUrl = (url: string): string => {
// DuckDuckGo often wraps URLs in redirects
if (url.includes("uddg=")) {
const match = url.match(/uddg=([^&]+)/);
if (match) {
return decodeURIComponent(match[1]);
const parseRssResults = (rss: string, maxResults: number): SearchResult[] => {
const results: SearchResult[] = [];
// Parse RSS items
const itemPattern = /<item>([\s\S]*?)<\/item>/gi;
let match: RegExpExecArray | null;
while ((match = itemPattern.exec(rss)) !== null && results.length < maxResults) {
const itemContent = match[1];
const titleMatch = itemContent.match(/<title>([^<]+)<\/title>/);
const linkMatch = itemContent.match(/<link>([^<]+)<\/link>/);
const descMatch = itemContent.match(/<description>([^<]*)<\/description>/);
if (titleMatch && linkMatch) {
results.push({
title: decodeHtmlEntities(titleMatch[1].trim()),
url: linkMatch[1].trim(),
snippet: descMatch ? decodeHtmlEntities(descMatch[1].trim()) : "",
});
}
}
return url;
return results;
};
/**
* Perform web search using DuckDuckGo
* Perform web search using Bing RSS
*/
const performSearch = async (
query: string,
@@ -170,13 +122,13 @@ const performSearch = async (
): Promise<SearchResult[]> => {
const encodedQuery = encodeURIComponent(query);
// Use DuckDuckGo HTML search (lite version for easier parsing)
const searchUrl = `https://lite.duckduckgo.com/lite/?q=${encodedQuery}`;
// Use Bing RSS search (no captcha, no API key required)
const searchUrl = `https://www.bing.com/search?q=${encodedQuery}&format=rss`;
const response = await fetch(searchUrl, {
headers: {
"User-Agent": WEB_SEARCH_DEFAULTS.USER_AGENT,
Accept: "text/html",
Accept: "application/rss+xml, text/xml",
"Accept-Language": "en-US,en;q=0.9",
},
signal,
@@ -186,8 +138,8 @@ const performSearch = async (
throw new Error(`Search request failed: ${response.status}`);
}
const html = await response.text();
return parseSearchResults(html, maxResults);
const rss = await response.text();
return parseRssResults(rss, maxResults);
};
/**

View File

@@ -62,6 +62,9 @@ interface AppProps extends TuiInput {
scope?: LearningScope,
editedContent?: string,
) => void;
onBrainSetJwtToken?: (jwtToken: string) => Promise<void>;
onBrainSetApiKey?: (apiKey: string) => Promise<void>;
onBrainLogout?: () => Promise<void>;
plan?: {
id: string;
title: string;
@@ -139,9 +142,14 @@ function AppContent(props: AppProps) {
app.setCascadeEnabled(props.cascadeEnabled);
}
// Navigate to session if resuming
if (props.sessionId) {
route.goToSession(props.sessionId);
// Always navigate to session view (skip home page)
// Use existing sessionId or create a new one
if (!route.isSession()) {
const sessionId = props.sessionId ?? `session-${Date.now()}`;
batch(() => {
app.setSessionInfo(sessionId, app.provider(), app.model());
route.goToSession(sessionId);
});
}
if (props.availableModels && props.availableModels.length > 0) {
@@ -375,6 +383,9 @@ function AppContent(props: AppProps) {
onCascadeToggle={handleCascadeToggle}
onPermissionResponse={handlePermissionResponse}
onLearningResponse={handleLearningResponse}
onBrainSetJwtToken={props.onBrainSetJwtToken}
onBrainSetApiKey={props.onBrainSetApiKey}
onBrainLogout={props.onBrainLogout}
plan={props.plan}
agents={props.agents}
currentAgent={props.currentAgent}
@@ -429,6 +440,9 @@ export interface TuiRenderOptions extends TuiInput {
scope?: LearningScope,
editedContent?: string,
) => void;
onBrainSetJwtToken?: (jwtToken: string) => Promise<void>;
onBrainSetApiKey?: (apiKey: string) => Promise<void>;
onBrainLogout?: () => Promise<void>;
plan?: {
id: string;
title: string;

View File

@@ -0,0 +1,411 @@
import { createSignal, Show, For } from "solid-js";
import { useKeyboard } from "@opentui/solid";
import { TextAttributes } from "@opentui/core";
import { useTheme } from "@tui-solid/context/theme";
import { useAppStore } from "@tui-solid/context/app";
import { BRAIN_BANNER } from "@constants/brain";
interface BrainMenuProps {
onSetJwtToken: (jwtToken: string) => Promise<void>;
onSetApiKey: (apiKey: string) => Promise<void>;
onLogout: () => Promise<void>;
onClose: () => void;
isActive?: boolean;
}
type MenuView = "main" | "login_url" | "jwt_input" | "apikey";
interface MenuItem {
id: string;
label: string;
description: string;
action: () => void;
disabled?: boolean;
}
export function BrainMenu(props: BrainMenuProps) {
const theme = useTheme();
const app = useAppStore();
const isActive = () => props.isActive ?? true;
const [view, setView] = createSignal<MenuView>("main");
const [selectedIndex, setSelectedIndex] = createSignal(0);
const [jwtToken, setJwtToken] = createSignal("");
const [apiKey, setApiKey] = createSignal("");
const [error, setError] = createSignal<string | null>(null);
const [loading, setLoading] = createSignal(false);
const isConnected = () => app.brain().status === "connected";
const menuItems = (): MenuItem[] => {
const items: MenuItem[] = [];
if (!isConnected()) {
items.push({
id: "login",
label: "Login with Email",
description: "Get JWT token from the web portal",
action: () => {
setView("login_url");
setSelectedIndex(0);
},
});
items.push({
id: "apikey",
label: "Use API Key",
description: "Enter your API key directly",
action: () => {
setView("apikey");
setSelectedIndex(0);
},
});
} else {
items.push({
id: "logout",
label: "Logout",
description: "Disconnect from CodeTyper Brain",
action: async () => {
setLoading(true);
try {
await props.onLogout();
} catch (err) {
setError(err instanceof Error ? err.message : "Logout failed");
} finally {
setLoading(false);
}
},
});
}
items.push({
id: "close",
label: "Close",
description: "Return to session",
action: () => props.onClose(),
});
return items;
};
const handleJwtSubmit = async (): Promise<void> => {
if (!jwtToken()) {
setError("JWT token is required");
return;
}
setLoading(true);
setError(null);
try {
await props.onSetJwtToken(jwtToken());
setView("main");
setJwtToken("");
} catch (err) {
setError(err instanceof Error ? err.message : "Failed to set JWT token");
} finally {
setLoading(false);
}
};
const handleApiKey = async (): Promise<void> => {
if (!apiKey()) {
setError("API key is required");
return;
}
setLoading(true);
setError(null);
try {
await props.onSetApiKey(apiKey());
setView("main");
setApiKey("");
} catch (err) {
setError(err instanceof Error ? err.message : "Failed to set API key");
} finally {
setLoading(false);
}
};
useKeyboard((evt) => {
if (!isActive()) return;
if (evt.name === "escape") {
if (view() !== "main") {
setView("main");
setError(null);
setSelectedIndex(0);
} else {
props.onClose();
}
evt.preventDefault();
evt.stopPropagation();
return;
}
// Main menu navigation
if (view() === "main") {
if (evt.name === "up") {
setSelectedIndex((prev) => (prev > 0 ? prev - 1 : menuItems().length - 1));
evt.preventDefault();
return;
}
if (evt.name === "down") {
setSelectedIndex((prev) => (prev < menuItems().length - 1 ? prev + 1 : 0));
evt.preventDefault();
return;
}
if (evt.name === "return") {
const item = menuItems()[selectedIndex()];
if (item && !item.disabled) {
item.action();
}
evt.preventDefault();
return;
}
}
// Login URL view - press Enter to go to JWT input
if (view() === "login_url") {
if (evt.name === "return") {
setView("jwt_input");
evt.preventDefault();
return;
}
}
// JWT token input handling
if (view() === "jwt_input") {
if (evt.name === "return") {
handleJwtSubmit();
evt.preventDefault();
return;
}
if (evt.name === "backspace") {
setJwtToken((prev) => prev.slice(0, -1));
evt.preventDefault();
return;
}
if (evt.name.length === 1 && !evt.ctrl && !evt.meta) {
setJwtToken((prev) => prev + evt.name);
evt.preventDefault();
return;
}
}
// API key form handling
if (view() === "apikey") {
if (evt.name === "return") {
handleApiKey();
evt.preventDefault();
return;
}
if (evt.name === "backspace") {
setApiKey((prev) => prev.slice(0, -1));
evt.preventDefault();
return;
}
if (evt.name.length === 1 && !evt.ctrl && !evt.meta) {
setApiKey((prev) => prev + evt.name);
evt.preventDefault();
return;
}
}
});
const getStatusColor = (): string => {
const status = app.brain().status;
const colorMap: Record<string, string> = {
connected: theme.colors.success,
connecting: theme.colors.warning,
disconnected: theme.colors.textDim,
error: theme.colors.error,
};
return colorMap[status] ?? theme.colors.textDim;
};
const getStatusText = (): string => {
const status = app.brain().status;
const textMap: Record<string, string> = {
connected: "Connected",
connecting: "Connecting...",
disconnected: "Not connected",
error: "Connection error",
};
return textMap[status] ?? "Unknown";
};
return (
<box
flexDirection="column"
borderColor={theme.colors.accent}
border={["top", "bottom", "left", "right"]}
backgroundColor={theme.colors.background}
paddingLeft={1}
paddingRight={1}
width={60}
>
{/* Header */}
<box marginBottom={1} flexDirection="row" gap={1}>
<text fg="#ff69b4" attributes={TextAttributes.BOLD}>
{BRAIN_BANNER.EMOJI_CONNECTED}
</text>
<text fg={theme.colors.accent} attributes={TextAttributes.BOLD}>
CodeTyper Brain
</text>
</box>
{/* Status */}
<box marginBottom={1} flexDirection="row">
<text fg={theme.colors.textDim}>Status: </text>
<text fg={getStatusColor()}>{getStatusText()}</text>
<Show when={isConnected()}>
<text fg={theme.colors.textDim}>
{" "}({app.brain().knowledgeCount}K / {app.brain().memoryCount}M)
</text>
</Show>
</box>
<Show when={isConnected() && app.brain().user}>
<box marginBottom={1} flexDirection="row">
<text fg={theme.colors.textDim}>User: </text>
<text fg={theme.colors.info}>
{app.brain().user?.display_name ?? app.brain().user?.email}
</text>
</box>
</Show>
{/* Error message */}
<Show when={error()}>
<box marginBottom={1}>
<text fg={theme.colors.error}>{error()}</text>
</box>
</Show>
{/* Main menu view */}
<Show when={view() === "main"}>
<box flexDirection="column">
<For each={menuItems()}>
{(item, index) => {
const isSelected = () => index() === selectedIndex();
return (
<box flexDirection="column" marginBottom={1}>
<box flexDirection="row">
<text
fg={isSelected() ? theme.colors.accent : undefined}
attributes={isSelected() ? TextAttributes.BOLD : TextAttributes.NONE}
>
{isSelected() ? "> " : " "}
</text>
<text
fg={isSelected() ? theme.colors.accent : undefined}
attributes={isSelected() ? TextAttributes.BOLD : TextAttributes.NONE}
>
{item.label}
</text>
</box>
<box marginLeft={4}>
<text fg={theme.colors.textDim}>{item.description}</text>
</box>
</box>
);
}}
</For>
</box>
<box marginTop={1} flexDirection="column">
<text fg={theme.colors.info}>{BRAIN_BANNER.CTA}: {BRAIN_BANNER.URL}</text>
<text fg={theme.colors.textDim}>
Arrow keys navigate | Enter select | Esc close
</text>
</box>
</Show>
{/* Login URL view - shows where to login */}
<Show when={view() === "login_url"}>
<box flexDirection="column">
<box marginBottom={1}>
<text fg={theme.colors.text}>1. Go to this page to login:</text>
</box>
<box marginBottom={1}>
<text fg={theme.colors.accent} attributes={TextAttributes.BOLD}>
{BRAIN_BANNER.LOGIN_URL}
</text>
</box>
<box marginBottom={1}>
<text fg={theme.colors.text}>2. After logging in, copy your JWT token</text>
</box>
<box marginBottom={1}>
<text fg={theme.colors.text}>3. Press Enter to input your token</text>
</box>
</box>
<box marginTop={1}>
<text fg={theme.colors.textDim}>
Enter continue | Esc back
</text>
</box>
</Show>
{/* JWT token input view */}
<Show when={view() === "jwt_input"}>
<box flexDirection="column">
<box marginBottom={1} flexDirection="column">
<text fg={theme.colors.accent}>JWT Token:</text>
<box
borderColor={theme.colors.accent}
border={["top", "bottom", "left", "right"]}
paddingLeft={1}
paddingRight={1}
>
<text fg={theme.colors.text}>
{jwtToken() ? "*".repeat(Math.min(jwtToken().length, 40)) : " "}
</text>
</box>
</box>
<Show when={loading()}>
<text fg={theme.colors.warning}>Saving token...</text>
</Show>
</box>
<box marginTop={1}>
<text fg={theme.colors.textDim}>Enter save | Esc back</text>
</box>
</Show>
{/* API key form view */}
<Show when={view() === "apikey"}>
<box flexDirection="column">
<box marginBottom={1} flexDirection="column">
<text fg={theme.colors.accent}>API Key:</text>
<box
borderColor={theme.colors.accent}
border={["top", "bottom", "left", "right"]}
paddingLeft={1}
paddingRight={1}
>
<text fg={theme.colors.text}>
{apiKey() ? "*".repeat(Math.min(apiKey().length, 40)) : " "}
</text>
</box>
</box>
<Show when={loading()}>
<text fg={theme.colors.warning}>Setting API key...</text>
</Show>
</box>
<box marginTop={1}>
<text fg={theme.colors.textDim}>Enter save | Esc back</text>
</box>
</Show>
</box>
);
}

View File

@@ -3,6 +3,7 @@ import { useKeyboard } from "@opentui/solid";
import { TextAttributes } from "@opentui/core";
import { useTheme } from "@tui-solid/context/theme";
import { useAppStore } from "@tui-solid/context/app";
import { BRAIN_DISABLED } from "@constants/brain";
import type { SlashCommand, CommandCategory } from "@/types/tui";
import { SLASH_COMMANDS, COMMAND_CATEGORIES } from "@constants/tui-components";
@@ -22,9 +23,14 @@ const filterCommands = (
commands: readonly SlashCommand[],
filter: string,
): SlashCommand[] => {
if (!filter) return [...commands];
// Filter out brain command when Brain is disabled
let availableCommands = BRAIN_DISABLED
? commands.filter((cmd) => cmd.name !== "brain")
: [...commands];
if (!filter) return availableCommands;
const query = filter.toLowerCase();
return commands.filter(
return availableCommands.filter(
(cmd) =>
cmd.name.toLowerCase().includes(query) ||
cmd.description.toLowerCase().includes(query),

View File

@@ -156,11 +156,11 @@ export function DebugLogPanel() {
paddingRight={1}
borderColor={theme.colors.border}
border={["bottom"]}
flexDirection="row"
>
<text fg={theme.colors.accent} attributes={TextAttributes.BOLD}>
Debug Logs
Debug Logs ({entries().length})
</text>
<text fg={theme.colors.textDim}> ({entries().length})</text>
</box>
<scrollbox

View File

@@ -2,6 +2,11 @@ import { Show, createMemo } from "solid-js";
import { TextAttributes } from "@opentui/core";
import { useTheme } from "@tui-solid/context/theme";
import { useAppStore } from "@tui-solid/context/app";
import { BRAIN_BANNER, BRAIN_DISABLED } from "@constants/brain";
import {
TOKEN_WARNING_THRESHOLD,
TOKEN_CRITICAL_THRESHOLD,
} from "@constants/token";
interface HeaderProps {
showBanner?: boolean;
@@ -25,6 +30,30 @@ const MODE_COLORS = {
"code-review": "success",
} as const;
const BRAIN_STATUS_COLORS = {
connected: "success",
connecting: "warning",
disconnected: "textDim",
error: "error",
} as const;
const TOKEN_STATUS_COLORS = {
normal: "textDim",
warning: "warning",
critical: "error",
compacting: "info",
} as const;
/**
* Format token count for display (e.g., 45.2K)
*/
const formatTokenCount = (tokens: number): string => {
if (tokens >= 1000) {
return `${(tokens / 1000).toFixed(1)}K`;
}
return tokens.toString();
};
export function Header(props: HeaderProps) {
const theme = useTheme();
const app = useAppStore();
@@ -35,57 +64,162 @@ export function Header(props: HeaderProps) {
return theme.colors[colorKey];
});
return (
<box
flexDirection="row"
justifyContent="space-between"
paddingLeft={1}
paddingRight={1}
borderColor={theme.colors.border}
border={["bottom"]}
>
<box flexDirection="row" gap={1}>
<Show when={showBanner()}>
<text fg={theme.colors.primary} attributes={TextAttributes.BOLD}>
CodeTyper
</text>
</Show>
<text fg={theme.colors.textDim}>v{app.version()}</text>
<text fg={theme.colors.textDim}>|</text>
<box flexDirection="row">
<text fg={modeColor()} attributes={TextAttributes.BOLD}>
[{MODE_LABELS[app.interactionMode()]}]
</text>
<Show when={app.currentAgent() !== "default"}>
<text fg={theme.colors.secondary} attributes={TextAttributes.BOLD}>
{" "}
@{app.currentAgent()}
</text>
</Show>
<text fg={theme.colors.textDim}>
{" "}
- {MODE_DESCRIPTIONS[app.interactionMode()]}
</text>
</box>
</box>
const brainColor = createMemo(() => {
const brain = app.brain();
const colorKey = BRAIN_STATUS_COLORS[brain.status];
return theme.colors[colorKey];
});
<box flexDirection="row" gap={2}>
<box flexDirection="row">
<text fg={theme.colors.textDim}>Provider: </text>
<text fg={theme.colors.secondary}>{app.provider()}</text>
</box>
<box flexDirection="row">
<text fg={theme.colors.textDim}>Model: </text>
<text fg={theme.colors.accent}>{app.model() || "auto"}</text>
</box>
<Show when={app.sessionId()}>
<box flexDirection="row">
<text fg={theme.colors.textDim}>Session: </text>
<text fg={theme.colors.info}>
{app.sessionId()?.replace("session-", "").slice(-5)}
const shouldShowBrainBanner = createMemo(() => {
if (BRAIN_DISABLED) return false;
const brain = app.brain();
return brain.showBanner && brain.status === "disconnected";
});
// Context window usage calculation
const contextUsage = createMemo(() => {
const stats = app.sessionStats();
const totalTokens = stats.inputTokens + stats.outputTokens;
const maxTokens = stats.contextMaxTokens;
const usagePercent = maxTokens > 0 ? (totalTokens / maxTokens) * 100 : 0;
let status: "normal" | "warning" | "critical" | "compacting" = "normal";
if (app.isCompacting()) {
status = "compacting";
} else if (usagePercent >= TOKEN_CRITICAL_THRESHOLD * 100) {
status = "critical";
} else if (usagePercent >= TOKEN_WARNING_THRESHOLD * 100) {
status = "warning";
}
return {
current: totalTokens,
max: maxTokens,
percent: usagePercent,
status,
};
});
const tokenColor = createMemo(() => {
const colorKey = TOKEN_STATUS_COLORS[contextUsage().status];
return theme.colors[colorKey];
});
return (
<box flexDirection="column">
{/* Brain Banner - shown when not connected */}
<Show when={shouldShowBrainBanner()}>
<box
flexDirection="row"
justifyContent="space-between"
paddingLeft={1}
paddingRight={1}
backgroundColor="#1a1a2e"
>
<box flexDirection="row" gap={1}>
<text fg="#ff69b4" attributes={TextAttributes.BOLD}>
{BRAIN_BANNER.EMOJI_CONNECTED}
</text>
<text fg="#ffffff" attributes={TextAttributes.BOLD}>
{BRAIN_BANNER.TITLE}
</text>
<text fg={theme.colors.textDim}>-</text>
<text fg={theme.colors.textDim}>{BRAIN_BANNER.CTA}:</text>
<text fg={theme.colors.info} attributes={TextAttributes.UNDERLINE}>
{BRAIN_BANNER.URL}
</text>
</box>
</Show>
<text fg={theme.colors.textDim}>[Ctrl+B dismiss]</text>
</box>
</Show>
{/* Main Header */}
<box
flexDirection="row"
justifyContent="space-between"
paddingLeft={1}
paddingRight={1}
borderColor={theme.colors.border}
border={["bottom"]}
>
<box flexDirection="row" gap={1}>
<Show when={showBanner()}>
<text fg={theme.colors.primary} attributes={TextAttributes.BOLD}>
CodeTyper
</text>
</Show>
<text fg={theme.colors.textDim}>v{app.version()}</text>
<text fg={theme.colors.textDim}>|</text>
<box flexDirection="row">
<text fg={modeColor()} attributes={TextAttributes.BOLD}>
[{MODE_LABELS[app.interactionMode()]}]
</text>
<Show when={app.currentAgent() !== "default"}>
<text fg={theme.colors.secondary} attributes={TextAttributes.BOLD}>
{" "}
@{app.currentAgent()}
</text>
</Show>
<text fg={theme.colors.textDim}>
{" "}
- {MODE_DESCRIPTIONS[app.interactionMode()]}
</text>
</box>
</box>
<box flexDirection="row" gap={2}>
{/* Context Window Usage */}
<Show when={contextUsage().max > 0}>
<box flexDirection="row">
<text fg={tokenColor()}>
{formatTokenCount(contextUsage().current)}
</text>
<text fg={theme.colors.textDim}>/</text>
<text fg={theme.colors.textDim}>
{formatTokenCount(contextUsage().max)}
</text>
<Show when={contextUsage().status === "compacting"}>
<text fg={theme.colors.info}> [compacting]</text>
</Show>
</box>
</Show>
{/* Brain Status Indicator - hidden when BRAIN_DISABLED */}
<Show when={!BRAIN_DISABLED}>
<box flexDirection="row">
<text fg={brainColor()}>
{app.brain().status === "connected"
? BRAIN_BANNER.EMOJI_CONNECTED
: app.brain().status === "connecting"
? "..."
: BRAIN_BANNER.EMOJI_DISCONNECTED}
</text>
<Show when={app.brain().status === "connected"}>
<text fg={theme.colors.textDim}>
{" "}
{app.brain().knowledgeCount}K/{app.brain().memoryCount}M
</text>
</Show>
</box>
</Show>
<box flexDirection="row">
<text fg={theme.colors.textDim}>Provider: </text>
<text fg={theme.colors.secondary}>{app.provider()}</text>
</box>
<box flexDirection="row">
<text fg={theme.colors.textDim}>Model: </text>
<text fg={theme.colors.accent}>{app.model() || "auto"}</text>
</box>
<Show when={app.sessionId()}>
<box flexDirection="row">
<text fg={theme.colors.textDim}>Session: </text>
<text fg={theme.colors.info}>
{app.sessionId()?.replace("session-", "").slice(-5)}
</text>
</box>
</Show>
</box>
</box>
</box>
);

View File

@@ -94,7 +94,11 @@ export function InputArea(props: InputAreaProps) {
mode === "permission_prompt" ||
mode === "learning_prompt" ||
mode === "help_menu" ||
mode === "help_detail"
mode === "help_detail" ||
mode === "brain_menu" ||
mode === "brain_login" ||
mode === "provider_select" ||
mode === "mcp_browse"
);
});
const placeholder = () =>
@@ -108,10 +112,10 @@ export function InputArea(props: InputAreaProps) {
// Handle "/" to open command menu when input is empty
// Handle Enter to submit (backup in case onSubmit doesn't fire)
// Handle Ctrl+Tab to toggle interaction mode
// Handle Ctrl+M to toggle interaction mode (Ctrl+Tab doesn't work in most terminals)
useKeyboard((evt) => {
// Ctrl+Tab works even when locked or menus are open
if (evt.ctrl && evt.name === "tab") {
// Ctrl+M works even when locked or menus are open
if (evt.ctrl && evt.name === "m") {
app.toggleInteractionMode();
evt.preventDefault();
evt.stopPropagation();

View File

@@ -4,6 +4,7 @@ import type { ScrollBoxRenderable } from "@opentui/core";
import { useTheme } from "@tui-solid/context/theme";
import { useAppStore } from "@tui-solid/context/app";
import { LogEntryDisplay } from "@tui-solid/components/log-entry";
import { ASCII_LOGO, ASCII_LOGO_GRADIENT, HOME_VARS } from "@constants/home";
const SCROLL_LINES = 3;
const MOUSE_ENABLE = "\x1b[?1000h\x1b[?1006h";
@@ -141,10 +142,22 @@ export function LogPanel() {
<Show
when={hasContent()}
fallback={
<box flexGrow={1} alignItems="center" justifyContent="center">
<text fg={theme.colors.textDim}>
No messages yet. Type your prompt below.
</text>
<box
flexGrow={1}
alignItems="center"
justifyContent="center"
flexDirection="column"
>
<For each={ASCII_LOGO}>
{(line, index) => (
<text fg={ASCII_LOGO_GRADIENT[index()] ?? theme.colors.primary}>
{line}
</text>
)}
</For>
<box marginTop={2}>
<text fg={theme.colors.textDim}>{HOME_VARS.subTitle}</text>
</box>
</box>
}
>

View File

@@ -155,11 +155,6 @@ export function StatusBar() {
const hints = createMemo(() => {
const result: string[] = [];
// Show mode toggle hint when idle
if (!isProcessing()) {
result.push("^Tab toggle mode");
}
if (isProcessing()) {
result.push(
app.interruptPending()
@@ -168,10 +163,6 @@ export function StatusBar() {
);
}
if (app.todosVisible()) {
result.push(STATUS_HINTS.TOGGLE_TODOS);
}
result.push(formatDuration(elapsed()));
if (totalTokens() > 0) {

View File

@@ -16,6 +16,7 @@ import type {
SuggestionState,
} from "@/types/tui";
import type { ProviderModel } from "@/types/providers";
import type { BrainConnectionStatus, BrainUser } from "@/types/brain";
interface AppStore {
mode: AppMode;
@@ -44,6 +45,13 @@ interface AppStore {
streamingLog: StreamingLogState;
suggestions: SuggestionState;
cascadeEnabled: boolean;
brain: {
status: BrainConnectionStatus;
user: BrainUser | null;
knowledgeCount: number;
memoryCount: number;
showBanner: boolean;
};
}
interface AppContextValue {
@@ -81,6 +89,13 @@ interface AppContextValue {
streamingLogIsActive: Accessor<boolean>;
suggestions: Accessor<SuggestionState>;
cascadeEnabled: Accessor<boolean>;
brain: Accessor<{
status: BrainConnectionStatus;
user: BrainUser | null;
knowledgeCount: number;
memoryCount: number;
showBanner: boolean;
}>;
// Mode actions
setMode: (mode: AppMode) => void;
@@ -138,6 +153,7 @@ interface AppContextValue {
stopThinking: () => void;
addTokens: (input: number, output: number) => void;
resetSessionStats: () => void;
setContextMaxTokens: (maxTokens: number) => void;
// UI state actions
toggleTodos: () => void;
@@ -161,6 +177,13 @@ interface AppContextValue {
hideSuggestions: () => void;
showSuggestions: () => void;
// Brain actions
setBrainStatus: (status: BrainConnectionStatus) => void;
setBrainUser: (user: BrainUser | null) => void;
setBrainCounts: (knowledge: number, memory: number) => void;
setBrainShowBanner: (show: boolean) => void;
dismissBrainBanner: () => void;
// Computed
isInputLocked: () => boolean;
}
@@ -174,6 +197,7 @@ const createInitialSessionStats = (): SessionStats => ({
outputTokens: 0,
thinkingStartTime: null,
lastThinkingDuration: 0,
contextMaxTokens: 128000, // Default, updated when model is selected
});
const createInitialStreamingState = (): StreamingLogState => ({
@@ -225,6 +249,13 @@ export const { provider: AppStoreProvider, use: useAppStore } =
streamingLog: createInitialStreamingState(),
suggestions: createInitialSuggestionState(),
cascadeEnabled: true,
brain: {
status: "disconnected" as BrainConnectionStatus,
user: null,
knowledgeCount: 0,
memoryCount: 0,
showBanner: true,
},
});
// Input insert function (set by InputArea)
@@ -272,6 +303,7 @@ export const { provider: AppStoreProvider, use: useAppStore } =
const streamingLogIsActive = (): boolean => store.streamingLog.isStreaming;
const suggestions = (): SuggestionState => store.suggestions;
const cascadeEnabled = (): boolean => store.cascadeEnabled;
const brain = () => store.brain;
// Mode actions
const setMode = (newMode: AppMode): void => {
@@ -469,6 +501,27 @@ export const { provider: AppStoreProvider, use: useAppStore } =
setStore("cascadeEnabled", !store.cascadeEnabled);
};
// Brain actions
const setBrainStatus = (status: BrainConnectionStatus): void => {
setStore("brain", { ...store.brain, status });
};
const setBrainUser = (user: BrainUser | null): void => {
setStore("brain", { ...store.brain, user });
};
const setBrainCounts = (knowledgeCount: number, memoryCount: number): void => {
setStore("brain", { ...store.brain, knowledgeCount, memoryCount });
};
const setBrainShowBanner = (showBanner: boolean): void => {
setStore("brain", { ...store.brain, showBanner });
};
const dismissBrainBanner = (): void => {
setStore("brain", { ...store.brain, showBanner: false });
};
// Session stats actions
const startThinking = (): void => {
setStore("sessionStats", {
@@ -502,6 +555,13 @@ export const { provider: AppStoreProvider, use: useAppStore } =
setStore("sessionStats", createInitialSessionStats());
};
const setContextMaxTokens = (maxTokens: number): void => {
setStore("sessionStats", {
...store.sessionStats,
contextMaxTokens: maxTokens,
});
};
// UI state actions
const toggleTodos = (): void => {
setStore("todosVisible", !store.todosVisible);
@@ -698,6 +758,7 @@ export const { provider: AppStoreProvider, use: useAppStore } =
streamingLogIsActive,
suggestions,
cascadeEnabled,
brain,
// Mode actions
setMode,
@@ -752,11 +813,19 @@ export const { provider: AppStoreProvider, use: useAppStore } =
setCascadeEnabled,
toggleCascadeEnabled,
// Brain actions
setBrainStatus,
setBrainUser,
setBrainCounts,
setBrainShowBanner,
dismissBrainBanner,
// Session stats actions
startThinking,
stopThinking,
addTokens,
resetSessionStats,
setContextMaxTokens,
// UI state actions
toggleTodos,
@@ -818,6 +887,13 @@ const defaultAppState = {
isCompacting: false,
streamingLog: createInitialStreamingState(),
suggestions: createInitialSuggestionState(),
brain: {
status: "disconnected" as BrainConnectionStatus,
user: null,
knowledgeCount: 0,
memoryCount: 0,
showBanner: true,
},
};
export const appStore = {
@@ -850,6 +926,7 @@ export const appStore = {
isCompacting: storeRef.isCompacting(),
streamingLog: storeRef.streamingLog(),
suggestions: storeRef.suggestions(),
brain: storeRef.brain(),
};
},
@@ -958,6 +1035,11 @@ export const appStore = {
storeRef.resetSessionStats();
},
setContextMaxTokens: (maxTokens: number): void => {
if (!storeRef) return;
storeRef.setContextMaxTokens(maxTokens);
},
toggleTodos: (): void => {
if (!storeRef) return;
storeRef.toggleTodos();
@@ -1027,4 +1109,29 @@ export const appStore = {
if (!storeRef) return;
storeRef.toggleCascadeEnabled();
},
setBrainStatus: (status: BrainConnectionStatus): void => {
if (!storeRef) return;
storeRef.setBrainStatus(status);
},
setBrainUser: (user: BrainUser | null): void => {
if (!storeRef) return;
storeRef.setBrainUser(user);
},
setBrainCounts: (knowledge: number, memory: number): void => {
if (!storeRef) return;
storeRef.setBrainCounts(knowledge, memory);
},
setBrainShowBanner: (show: boolean): void => {
if (!storeRef) return;
storeRef.setBrainShowBanner(show);
},
dismissBrainBanner: (): void => {
if (!storeRef) return;
storeRef.dismissBrainBanner();
},
};

View File

@@ -53,8 +53,7 @@ export function Home(props: HomeProps) {
>
<Logo />
<box marginTop={2} flexDirection="column" alignItems="center">
<text fg={theme.colors.textDim}>{HOME_VARS.title}</text>
<box marginTop={2}>
<text fg={theme.colors.textDim}>{HOME_VARS.subTitle}</text>
</box>
</box>

View File

@@ -21,6 +21,8 @@ import { HelpDetail } from "@tui-solid/components/help-detail";
import { TodoPanel } from "@tui-solid/components/todo-panel";
import { CenteredModal } from "@tui-solid/components/centered-modal";
import { DebugLogPanel } from "@tui-solid/components/debug-log-panel";
import { BrainMenu } from "@tui-solid/components/brain-menu";
import { BRAIN_DISABLED } from "@constants/brain";
import type { PermissionScope, LearningScope, InteractionMode } from "@/types/tui";
import type { MCPAddFormData } from "@/types/mcp";
@@ -60,6 +62,9 @@ interface SessionProps {
scope?: LearningScope,
editedContent?: string,
) => void;
onBrainSetJwtToken?: (jwtToken: string) => Promise<void>;
onBrainSetApiKey?: (apiKey: string) => Promise<void>;
onBrainLogout?: () => Promise<void>;
plan?: {
id: string;
title: string;
@@ -113,6 +118,10 @@ export function Session(props: SessionProps) {
app.transitionFromCommandMenu("help_menu");
return;
}
if (lowerCommand === "brain" && !BRAIN_DISABLED) {
app.transitionFromCommandMenu("brain_menu");
return;
}
// For other commands, close menu and process through handler
app.closeCommandMenu();
props.onCommand(command);
@@ -192,6 +201,22 @@ export function Session(props: SessionProps) {
app.setMode("idle");
};
const handleBrainMenuClose = (): void => {
app.setMode("idle");
};
const handleBrainSetJwtToken = async (jwtToken: string): Promise<void> => {
await props.onBrainSetJwtToken?.(jwtToken);
};
const handleBrainSetApiKey = async (apiKey: string): Promise<void> => {
await props.onBrainSetApiKey?.(apiKey);
};
const handleBrainLogout = async (): Promise<void> => {
await props.onBrainLogout?.();
};
return (
<box
flexDirection="column"
@@ -362,6 +387,18 @@ export function Session(props: SessionProps) {
/>
</CenteredModal>
</Match>
<Match when={app.mode() === "brain_menu" && !BRAIN_DISABLED}>
<CenteredModal>
<BrainMenu
onSetJwtToken={handleBrainSetJwtToken}
onSetApiKey={handleBrainSetApiKey}
onLogout={handleBrainLogout}
onClose={handleBrainMenuClose}
isActive={app.mode() === "brain_menu"}
/>
</CenteredModal>
</Match>
</Switch>
</box>
);

View File

@@ -24,7 +24,6 @@ import {
MCPBrowser,
TodoPanel,
FilePicker,
HomeContent,
SessionHeader,
} from "@tui/components/index";
import { InputLine, calculateLineStartPos } from "@tui/components/input-line";
@@ -88,6 +87,8 @@ export function App({
const exitPending = useAppStore((state) => state.exitPending);
const setExitPending = useAppStore((state) => state.setExitPending);
const toggleTodos = useAppStore((state) => state.toggleTodos);
const toggleInteractionMode = useAppStore((state) => state.toggleInteractionMode);
const interactionMode = useAppStore((state) => state.interactionMode);
const startThinking = useAppStore((state) => state.startThinking);
const stopThinking = useAppStore((state) => state.stopThinking);
const scrollUp = useAppStore((state) => state.scrollUp);
@@ -98,6 +99,8 @@ export function App({
const setScreenMode = useAppStore((state) => state.setScreenMode);
const logs = useAppStore((state) => state.logs);
const sessionStats = useAppStore((state) => state.sessionStats);
const brain = useAppStore((state) => state.brain);
const dismissBrainBanner = useAppStore((state) => state.dismissBrainBanner);
// Local input state
const [inputBuffer, setInputBuffer] = useState("");
@@ -356,9 +359,24 @@ export function App({
// Global input handler for Ctrl+C, Ctrl+D, Ctrl+T, scroll (always active)
useInput((input, key) => {
// Handle Ctrl+T to toggle todos visibility
// Handle Ctrl+M to toggle interaction mode (Ctrl+Tab doesn't work in most terminals)
if (key.ctrl && input === "m") {
toggleInteractionMode();
// Note: The log will show the new mode after toggle
const newMode = useAppStore.getState().interactionMode;
addLog({
type: "system",
content: `Switched to ${newMode} mode (Ctrl+M)`,
});
return;
}
// Handle Ctrl+T to toggle todos visibility (only in agent/code-review modes)
if (key.ctrl && input === "t") {
toggleTodos();
const currentMode = useAppStore.getState().interactionMode;
if (currentMode === "agent" || currentMode === "code-review") {
toggleTodos();
}
return;
}
@@ -548,8 +566,8 @@ export function App({
pastedBlocks: updatedBlocks,
}));
}
} else if (input === "v") {
// Handle Ctrl+V for image paste
} else if (input === "v" || input === "\x16") {
// Handle Ctrl+V for image paste (v or raw control character)
readClipboardImage().then((image) => {
if (image) {
setPasteState((prev) => ({
@@ -562,12 +580,31 @@ export function App({
});
}
});
} else if (input === "i") {
// Handle Ctrl+I as alternative for image paste
readClipboardImage().then((image) => {
if (image) {
setPasteState((prev) => ({
...prev,
pastedImages: [...prev.pastedImages, image],
}));
addLog({
type: "system",
content: `Image attached (${image.mediaType})`,
});
} else {
addLog({
type: "system",
content: "No image found in clipboard",
});
}
});
}
return;
}
// Handle Cmd+V (macOS) for image paste
if (key.meta && input === "v") {
if (key.meta && (input === "v" || input === "\x16")) {
readClipboardImage().then((image) => {
if (image) {
setPasteState((prev) => ({
@@ -662,36 +699,31 @@ export function App({
// Calculate token count for session header
const totalTokens = sessionStats.inputTokens + sessionStats.outputTokens;
const isHomeMode = screenMode === "home" && logs.length === 0;
return (
<Box flexDirection="column" height="100%">
{/* Show session header only when in session mode */}
{!isHomeMode && (
<>
<SessionHeader
title={sessionId ?? "New session"}
tokenCount={totalTokens}
contextPercentage={15}
cost={0}
version={version}
/>
<StatusBar />
</>
)}
{/* Always show session header and status bar */}
<SessionHeader
title={sessionId ?? "New session"}
tokenCount={totalTokens}
contextPercentage={15}
cost={0}
version={version}
interactionMode={interactionMode}
brain={brain}
onDismissBrainBanner={dismissBrainBanner}
/>
<StatusBar />
<Box flexDirection="column" flexGrow={1}>
{/* Show home content or session content */}
{isHomeMode ? (
<HomeContent provider={provider} model={model} version={version} />
) : (
<Box flexDirection="row" flexGrow={1}>
<Box flexDirection="column" flexGrow={1}>
<LogPanel />
</Box>
<TodoPanel />
{/* Main content area with all panes */}
<Box flexDirection="row" flexGrow={1}>
<Box flexDirection="column" flexGrow={1}>
{/* LogPanel shows logo when empty, logs otherwise */}
<LogPanel />
</Box>
)}
<TodoPanel />
</Box>
<PermissionModal />
<LearningModal />
@@ -736,6 +768,7 @@ export function App({
{isMCPSelectOpen && (
<MCPSelect
onClose={handleMCPSelectClose}
onBrowse={() => setMode("mcp_browse")}
isActive={isMCPSelectOpen}
/>
)}
@@ -808,7 +841,7 @@ export function App({
<Box marginTop={1}>
<Text dimColor>
Enter to send Alt+Enter for newline @ to add files Ctrl+V to paste image
Enter @ files Ctrl+M mode Ctrl+I image
</Text>
</Box>
</Box>

View File

@@ -2,9 +2,10 @@
* CommandMenu Component - Slash command selection menu
*
* Shows when user types '/' and provides filterable command list
* Supports scrolling for small terminal windows
*/
import React, { useMemo } from "react";
import React, { useMemo, useState, useEffect } from "react";
import { Box, Text, useInput } from "ink";
import { useAppStore } from "@tui/store";
import type {
@@ -17,6 +18,9 @@ import { SLASH_COMMANDS, COMMAND_CATEGORIES } from "@constants/tui-components";
// Re-export for backwards compatibility
export { SLASH_COMMANDS } from "@constants/tui-components";
// Maximum visible items before scrolling
const MAX_VISIBLE = 12;
interface CommandWithIndex extends SlashCommand {
flatIndex: number;
}
@@ -58,12 +62,29 @@ export function CommandMenu({
(state) => state.setCommandSelectedIndex,
);
// Scroll offset for viewport
const [scrollOffset, setScrollOffset] = useState(0);
// Filter commands based on input
const filteredCommands = useMemo(
() => filterCommands(SLASH_COMMANDS, commandMenu.filter),
[commandMenu.filter],
);
// Reset scroll when filter changes
useEffect(() => {
setScrollOffset(0);
}, [commandMenu.filter]);
// Ensure selected index is visible
useEffect(() => {
if (commandMenu.selectedIndex < scrollOffset) {
setScrollOffset(commandMenu.selectedIndex);
} else if (commandMenu.selectedIndex >= scrollOffset + MAX_VISIBLE) {
setScrollOffset(commandMenu.selectedIndex - MAX_VISIBLE + 1);
}
}, [commandMenu.selectedIndex, scrollOffset]);
// Handle keyboard input
useInput(
(input, key) => {
@@ -81,7 +102,6 @@ export function CommandMenu({
if (filteredCommands.length > 0) {
const selected = filteredCommands[commandMenu.selectedIndex];
if (selected) {
// handleCommandSelect will close the menu
onSelect(selected.name);
}
}
@@ -113,7 +133,6 @@ export function CommandMenu({
if (filteredCommands.length > 0) {
const selected = filteredCommands[commandMenu.selectedIndex];
if (selected) {
// handleCommandSelect will close the menu
onSelect(selected.name);
}
}
@@ -154,6 +173,32 @@ export function CommandMenu({
})),
);
// Calculate visible items with scroll
const totalItems = filteredCommands.length;
const hasScrollUp = scrollOffset > 0;
const hasScrollDown = scrollOffset + MAX_VISIBLE < totalItems;
// Get visible commands
const visibleCommands = commandsWithIndex.slice(
scrollOffset,
scrollOffset + MAX_VISIBLE,
);
// Group visible commands by category for display
const visibleGrouped: Array<{
category: CommandCategory;
commands: CommandWithIndex[];
}> = [];
for (const cmd of visibleCommands) {
const existingGroup = visibleGrouped.find((g) => g.category === cmd.category);
if (existingGroup) {
existingGroup.commands.push(cmd);
} else {
visibleGrouped.push({ category: cmd.category, commands: [cmd] });
}
}
return (
<Box
flexDirection="column"
@@ -168,23 +213,26 @@ export function CommandMenu({
</Text>
{commandMenu.filter && <Text dimColor> - filtering: </Text>}
{commandMenu.filter && <Text color="yellow">{commandMenu.filter}</Text>}
<Text dimColor> ({totalItems})</Text>
</Box>
{hasScrollUp && (
<Box justifyContent="center">
<Text color="gray"> more ({scrollOffset} above)</Text>
</Box>
)}
{filteredCommands.length === 0 ? (
<Text dimColor>No commands match "{commandMenu.filter}"</Text>
) : (
<Box flexDirection="column">
{groupedCommands.map((group) => (
{visibleGrouped.map((group) => (
<Box key={group.category} flexDirection="column" marginBottom={1}>
<Text dimColor bold>
{capitalizeCategory(group.category)}
</Text>
{group.commands.map((cmd) => {
const cmdWithIndex = commandsWithIndex.find(
(c) => c.name === cmd.name,
);
const isSelected =
cmdWithIndex?.flatIndex === commandMenu.selectedIndex;
const isSelected = cmd.flatIndex === commandMenu.selectedIndex;
return (
<Box key={cmd.name}>
<Text
@@ -205,6 +253,12 @@ export function CommandMenu({
</Box>
)}
{hasScrollDown && (
<Box justifyContent="center">
<Text color="gray"> more ({totalItems - scrollOffset - MAX_VISIBLE} below)</Text>
</Box>
)}
<Box marginTop={1}>
<Text dimColor>
Esc to close | Enter/Tab to select | Type to filter

View File

@@ -18,15 +18,19 @@ import type { MCPServerInstance, MCPServerConfig } from "@/types/mcp";
interface MCPSelectProps {
onClose: () => void;
onBrowse?: () => void;
isActive?: boolean;
}
type MenuMode = "list" | "add_name" | "add_command" | "add_args";
type ActionType = "add" | "browse" | "search" | "popular";
interface MenuItem {
id: string;
name: string;
type: "server" | "action";
actionType?: ActionType;
server?: MCPServerInstance;
config?: MCPServerConfig;
}
@@ -42,6 +46,7 @@ const STATE_COLORS: Record<string, string> = {
export function MCPSelect({
onClose,
onBrowse,
isActive = true,
}: MCPSelectProps): React.ReactElement {
const [servers, setServers] = useState<Map<string, MCPServerInstance>>(
@@ -76,11 +81,26 @@ export function MCPSelect({
const menuItems = useMemo((): MenuItem[] => {
const items: MenuItem[] = [];
// Add "Add new server" action
// Add action items first
items.push({
id: "__browse__",
name: "🔍 Browse & Search servers",
type: "action",
actionType: "browse",
});
items.push({
id: "__popular__",
name: "⭐ Popular servers",
type: "action",
actionType: "popular",
});
items.push({
id: "__add__",
name: "+ Add new MCP server",
name: "+ Add server manually",
type: "action",
actionType: "add",
});
// Add servers
@@ -231,9 +251,36 @@ export function MCPSelect({
if (filteredItems.length > 0) {
const selected = filteredItems[selectedIndex];
if (selected) {
if (selected.type === "action" && selected.id === "__add__") {
setMode("add_name");
setMessage(null);
if (selected.type === "action") {
const actionHandlers: Record<ActionType, () => void> = {
add: () => {
setMode("add_name");
setMessage(null);
},
browse: () => {
if (onBrowse) {
onBrowse();
} else {
onClose();
}
},
popular: () => {
if (onBrowse) {
onBrowse();
} else {
onClose();
}
},
search: () => {
if (onBrowse) {
onBrowse();
} else {
onClose();
}
},
};
const handler = actionHandlers[selected.actionType || "add"];
handler();
} else if (selected.type === "server") {
toggleServer(selected);
}

View File

@@ -1,12 +1,13 @@
/**
* SessionHeader Component
* Header showing session title, token count, cost, and version
* Header showing session title, token count, cost, version, and brain status
*/
import React from "react";
import { Box, Text } from "ink";
import { useThemeColors } from "@tui/hooks/useThemeStore";
import type { SessionHeaderProps } from "@types/home-screen";
import { BRAIN_BANNER } from "@constants/brain";
const formatCost = (cost: number): string => {
return new Intl.NumberFormat("en-US", {
@@ -22,12 +23,41 @@ const formatTokenCount = (count: number): string => {
return count.toLocaleString();
};
const MODE_COLORS: Record<string, string> = {
agent: "cyan",
ask: "green",
"code-review": "yellow",
};
const MODE_LABELS: Record<string, string> = {
agent: "AGENT",
ask: "ASK",
"code-review": "REVIEW",
};
const BRAIN_STATUS_COLORS: Record<string, string> = {
connected: "green",
connecting: "yellow",
disconnected: "gray",
error: "red",
};
const BRAIN_STATUS_ICONS: Record<string, string> = {
connected: BRAIN_BANNER.EMOJI_CONNECTED,
connecting: "...",
disconnected: BRAIN_BANNER.EMOJI_DISCONNECTED,
error: "!",
};
export const SessionHeader: React.FC<SessionHeaderProps> = ({
title,
tokenCount,
contextPercentage,
cost,
version,
interactionMode = "agent",
brain,
onDismissBrainBanner,
}) => {
const colors = useThemeColors();
@@ -36,33 +66,94 @@ export const SessionHeader: React.FC<SessionHeaderProps> = ({
? `${formatTokenCount(tokenCount)} ${contextPercentage}%`
: formatTokenCount(tokenCount);
return (
<Box
flexShrink={0}
borderStyle="single"
borderLeft={true}
borderRight={false}
borderTop={false}
borderBottom={false}
borderColor={colors.border}
paddingTop={1}
paddingBottom={1}
paddingLeft={2}
paddingRight={1}
backgroundColor={colors.backgroundPanel}
>
<Box flexDirection="row" justifyContent="space-between" width="100%">
{/* Title */}
<Text color={colors.text}>
<Text bold>#</Text> <Text bold>{title}</Text>
</Text>
const modeColor = MODE_COLORS[interactionMode] || "cyan";
const modeLabel = MODE_LABELS[interactionMode] || interactionMode.toUpperCase();
{/* Context info and version */}
<Box flexDirection="row" gap={1} flexShrink={0}>
<Text color={colors.textDim}>
{contextInfo} ({formatCost(cost)})
</Text>
<Text color={colors.textDim}>v{version}</Text>
const brainStatus = brain?.status ?? "disconnected";
const brainColor = BRAIN_STATUS_COLORS[brainStatus] || "gray";
const brainIcon = BRAIN_STATUS_ICONS[brainStatus] || BRAIN_BANNER.EMOJI_DISCONNECTED;
const showBrainBanner = brain?.showBanner && brainStatus === "disconnected";
return (
<Box flexDirection="column" flexShrink={0}>
{/* Brain Banner - shown when not connected */}
{showBrainBanner && (
<Box
paddingLeft={2}
paddingRight={2}
paddingTop={0}
paddingBottom={0}
backgroundColor="#1a1a2e"
>
<Box flexDirection="row" justifyContent="space-between" width="100%">
<Box flexDirection="row" gap={1}>
<Text color="magenta" bold>
{BRAIN_BANNER.EMOJI_CONNECTED}
</Text>
<Text color="white" bold>
{BRAIN_BANNER.TITLE}
</Text>
<Text color="gray">
{" "}
- {BRAIN_BANNER.CTA}:{" "}
</Text>
<Text color="cyan" underline>
{BRAIN_BANNER.URL}
</Text>
</Box>
<Text color="gray" dimColor>
[press q to dismiss]
</Text>
</Box>
</Box>
)}
{/* Main Header */}
<Box
borderStyle="single"
borderLeft={true}
borderRight={false}
borderTop={false}
borderBottom={false}
borderColor={colors.border}
paddingTop={1}
paddingBottom={1}
paddingLeft={2}
paddingRight={1}
backgroundColor={colors.backgroundPanel}
>
<Box flexDirection="row" justifyContent="space-between" width="100%">
{/* Title and Mode */}
<Box flexDirection="row" gap={2}>
<Text color={colors.text}>
<Text bold>#</Text> <Text bold>{title}</Text>
</Text>
<Text color={modeColor} bold>
[{modeLabel}]
</Text>
</Box>
{/* Brain status, Context info and version */}
<Box flexDirection="row" gap={1} flexShrink={0}>
{/* Brain status indicator */}
{brain && (
<Box flexDirection="row" gap={0}>
<Text color={brainColor}>
{brainIcon}
</Text>
{brainStatus === "connected" && (
<Text color={colors.textDim}>
{" "}
{brain.knowledgeCount}K/{brain.memoryCount}M
</Text>
)}
</Box>
)}
<Text color={colors.textDim}>
{contextInfo} ({formatCost(cost)})
</Text>
<Text color={colors.textDim}>v{version}</Text>
</Box>
</Box>
</Box>
</Box>

View File

@@ -7,6 +7,7 @@
* - User scroll detection (scrolling up pauses auto-scroll)
* - Resume auto-scroll when user scrolls back to bottom
* - Virtual scrolling for performance
* - Shows logo and welcome screen when no logs
*/
import React, { useEffect, useRef, useMemo } from "react";
@@ -20,6 +21,7 @@ import {
import { LogEntryDisplay } from "@tui/components/log-panel/entry-renderers";
import { ThinkingIndicator } from "@tui/components/log-panel/thinking-indicator";
import { estimateEntryLines } from "@tui/components/log-panel/utils";
import { Logo } from "@tui/components/home/Logo";
export function LogPanel(): React.ReactElement {
const allLogs = useAppStore((state) => state.logs);
@@ -149,7 +151,15 @@ export function LogPanel(): React.ReactElement {
)}
{logs.length === 0 && !thinkingMessage ? (
<Text dimColor>No messages yet. Type your prompt below.</Text>
<Box flexDirection="column" flexGrow={1} alignItems="center" justifyContent="center">
<Logo />
<Box marginTop={1}>
<Text dimColor>AI Coding Assistant</Text>
</Box>
<Box marginTop={1}>
<Text dimColor>Type your prompt below Ctrl+M to switch modes</Text>
</Box>
</Box>
) : (
<Box flexDirection="column" flexGrow={1}>
{visibleEntries.map((entry) => (

View File

@@ -7,6 +7,7 @@ import type {
AppState,
AppMode,
ScreenMode,
InteractionMode,
LogEntry,
ToolCall,
PermissionRequest,
@@ -17,6 +18,15 @@ import type {
SuggestionPrompt,
} from "@/types/tui";
import type { ProviderModel } from "@/types/providers";
import type { BrainConnectionStatus, BrainUser } from "@/types/brain";
const createInitialBrainState = () => ({
status: "disconnected" as BrainConnectionStatus,
user: null as BrainUser | null,
knowledgeCount: 0,
memoryCount: 0,
showBanner: true,
});
const createInitialSessionStats = (): SessionStats => ({
startTime: Date.now(),
@@ -44,10 +54,13 @@ const generateLogId = (): string => {
return `log-${++logIdCounter}-${Date.now()}`;
};
const INTERACTION_MODES: InteractionMode[] = ["agent", "ask", "code-review"];
export const useAppStore = create<AppState>((set, get) => ({
// Initial state
mode: "idle",
screenMode: "home",
interactionMode: "agent" as InteractionMode,
inputBuffer: "",
inputCursorPosition: 0,
logs: [],
@@ -78,10 +91,17 @@ export const useAppStore = create<AppState>((set, get) => ({
visibleHeight: 20,
streamingLog: createInitialStreamingState(),
suggestions: createInitialSuggestionState(),
brain: createInitialBrainState(),
// Mode actions
setMode: (mode: AppMode) => set({ mode }),
setScreenMode: (screenMode: ScreenMode) => set({ screenMode }),
setInteractionMode: (interactionMode: InteractionMode) => set({ interactionMode }),
toggleInteractionMode: () => set((state) => {
const currentIndex = INTERACTION_MODES.indexOf(state.interactionMode);
const nextIndex = (currentIndex + 1) % INTERACTION_MODES.length;
return { interactionMode: INTERACTION_MODES[nextIndex] };
}),
// Input actions
setInputBuffer: (buffer: string) => set({ inputBuffer: buffer }),
@@ -451,6 +471,32 @@ export const useAppStore = create<AppState>((set, get) => ({
},
})),
// Brain actions
setBrainStatus: (status: BrainConnectionStatus) =>
set((state) => ({
brain: { ...state.brain, status },
})),
setBrainUser: (user: BrainUser | null) =>
set((state) => ({
brain: { ...state.brain, user },
})),
setBrainCounts: (knowledgeCount: number, memoryCount: number) =>
set((state) => ({
brain: { ...state.brain, knowledgeCount, memoryCount },
})),
setBrainShowBanner: (showBanner: boolean) =>
set((state) => ({
brain: { ...state.brain, showBanner },
})),
dismissBrainBanner: () =>
set((state) => ({
brain: { ...state.brain, showBanner: false },
})),
// Computed - check if input should be locked
isInputLocked: () => {
const { mode } = get();
@@ -544,6 +590,14 @@ export const appStore = {
useAppStore.getState().toggleTodos();
},
toggleInteractionMode: () => {
useAppStore.getState().toggleInteractionMode();
},
setInteractionMode: (mode: InteractionMode) => {
useAppStore.getState().setInteractionMode(mode);
},
setInterruptPending: (pending: boolean) => {
useAppStore.getState().setInterruptPending(pending);
},
@@ -606,4 +660,25 @@ export const appStore = {
resumeAutoScroll: () => {
useAppStore.getState().resumeAutoScroll();
},
// Brain
setBrainStatus: (status: BrainConnectionStatus) => {
useAppStore.getState().setBrainStatus(status);
},
setBrainUser: (user: BrainUser | null) => {
useAppStore.getState().setBrainUser(user);
},
setBrainCounts: (knowledge: number, memory: number) => {
useAppStore.getState().setBrainCounts(knowledge, memory);
},
setBrainShowBanner: (show: boolean) => {
useAppStore.getState().setBrainShowBanner(show);
},
dismissBrainBanner: () => {
useAppStore.getState().dismissBrainBanner();
},
};

View File

@@ -0,0 +1,91 @@
/**
* Agent markdown definition types
* Agents are defined in markdown files with YAML frontmatter
* Location: .codetyper/agents/*.md
*/
export type AgentTier = "fast" | "balanced" | "thorough";
export type AgentColor =
| "red"
| "green"
| "blue"
| "yellow"
| "cyan"
| "magenta"
| "white"
| "gray";
export interface AgentDefinition {
readonly name: string;
readonly description: string;
readonly tools: ReadonlyArray<string>;
readonly tier: AgentTier;
readonly color: AgentColor;
readonly maxTurns?: number;
readonly systemPrompt?: string;
readonly triggerPhrases?: ReadonlyArray<string>;
readonly capabilities?: ReadonlyArray<string>;
readonly permissions?: AgentPermissions;
}
export interface AgentPermissions {
readonly allowedPaths?: ReadonlyArray<string>;
readonly deniedPaths?: ReadonlyArray<string>;
readonly allowedTools?: ReadonlyArray<string>;
readonly deniedTools?: ReadonlyArray<string>;
readonly requireApproval?: ReadonlyArray<string>;
}
export interface AgentDefinitionFile {
readonly filePath: string;
readonly frontmatter: AgentFrontmatter;
readonly content: string;
readonly parsed: AgentDefinition;
}
export interface AgentFrontmatter {
readonly name: string;
readonly description: string;
readonly tools: ReadonlyArray<string>;
readonly tier?: AgentTier;
readonly color?: AgentColor;
readonly maxTurns?: number;
readonly triggerPhrases?: ReadonlyArray<string>;
readonly capabilities?: ReadonlyArray<string>;
readonly allowedPaths?: ReadonlyArray<string>;
readonly deniedPaths?: ReadonlyArray<string>;
}
export interface AgentRegistry {
readonly agents: ReadonlyMap<string, AgentDefinition>;
readonly byTrigger: ReadonlyMap<string, string>;
readonly byCapability: ReadonlyMap<string, ReadonlyArray<string>>;
}
export interface AgentLoadResult {
readonly success: boolean;
readonly agent?: AgentDefinition;
readonly error?: string;
readonly filePath: string;
}
export const DEFAULT_AGENT_DEFINITION: Partial<AgentDefinition> = {
tier: "balanced",
color: "cyan",
maxTurns: 10,
tools: ["read", "glob", "grep"],
capabilities: [],
triggerPhrases: [],
};
export const AGENT_TIER_MODELS: Record<AgentTier, string> = {
fast: "gpt-4o-mini",
balanced: "gpt-4o",
thorough: "o1",
};
export const AGENT_DEFINITION_SCHEMA = {
required: ["name", "description", "tools"],
optional: ["tier", "color", "maxTurns", "triggerPhrases", "capabilities", "allowedPaths", "deniedPaths"],
};

145
src/types/apply-patch.ts Normal file
View File

@@ -0,0 +1,145 @@
/**
* Apply Patch Types
*
* Types for unified diff parsing and application.
* Supports fuzzy matching and rollback on failure.
*/
/**
* Patch line type
*/
export type PatchLineType =
| "context" // Unchanged line (starts with space)
| "addition" // Added line (starts with +)
| "deletion" // Removed line (starts with -)
| "header"; // Hunk header
/**
* Single line in a patch
*/
export interface PatchLine {
type: PatchLineType;
content: string;
originalLineNumber?: number;
newLineNumber?: number;
}
/**
* Patch hunk (a contiguous block of changes)
*/
export interface PatchHunk {
oldStart: number;
oldLines: number;
newStart: number;
newLines: number;
lines: PatchLine[];
header: string;
}
/**
* Parsed patch file for a single file
*/
export interface ParsedFilePatch {
oldPath: string;
newPath: string;
hunks: PatchHunk[];
isBinary: boolean;
isNew: boolean;
isDeleted: boolean;
isRenamed: boolean;
}
/**
* Complete parsed patch (may contain multiple files)
*/
export interface ParsedPatch {
files: ParsedFilePatch[];
rawPatch: string;
}
/**
* Fuzzy match result
*/
export interface FuzzyMatchResult {
found: boolean;
lineNumber: number;
offset: number;
confidence: number;
}
/**
* Hunk application result
*/
export interface HunkApplicationResult {
success: boolean;
hunkIndex: number;
appliedAt?: number;
error?: string;
fuzzyOffset?: number;
}
/**
* File patch result
*/
export interface FilePatchResult {
success: boolean;
filePath: string;
hunksApplied: number;
hunksFailed: number;
hunkResults: HunkApplicationResult[];
newContent?: string;
error?: string;
}
/**
* Overall patch application result
*/
export interface ApplyPatchResult {
success: boolean;
filesPatched: number;
filesFailed: number;
fileResults: FilePatchResult[];
rollbackAvailable: boolean;
error?: string;
}
/**
* Apply patch parameters
*/
export interface ApplyPatchParams {
patch: string;
targetFile?: string;
dryRun?: boolean;
fuzz?: number;
reverse?: boolean;
}
/**
* Rollback information
*/
export interface PatchRollback {
filePath: string;
originalContent: string;
patchedContent: string;
timestamp: number;
}
/**
* Patch validation result
*/
export interface PatchValidationResult {
valid: boolean;
errors: string[];
warnings: string[];
fileCount: number;
hunkCount: number;
}
/**
* Context line match options
*/
export interface ContextMatchOptions {
fuzz: number;
ignoreWhitespace: boolean;
ignoreCase: boolean;
}

View File

@@ -0,0 +1,113 @@
/**
* Background task types for async operations
* Allows tasks to run in background while user continues working
* Triggered with Ctrl+B or /background command
*/
export type BackgroundTaskStatus =
| "pending"
| "running"
| "paused"
| "completed"
| "failed"
| "cancelled";
export type BackgroundTaskPriority = "low" | "normal" | "high";
export interface BackgroundTask {
readonly id: string;
readonly name: string;
readonly description: string;
readonly status: BackgroundTaskStatus;
readonly priority: BackgroundTaskPriority;
readonly createdAt: number;
readonly startedAt?: number;
readonly completedAt?: number;
readonly progress: TaskProgress;
readonly result?: TaskResult;
readonly error?: TaskError;
readonly metadata: TaskMetadata;
}
export interface TaskProgress {
readonly current: number;
readonly total: number;
readonly percentage: number;
readonly message: string;
readonly steps: ReadonlyArray<TaskStep>;
}
export interface TaskStep {
readonly name: string;
readonly status: BackgroundTaskStatus;
readonly startedAt?: number;
readonly completedAt?: number;
readonly output?: string;
}
export interface TaskResult {
readonly success: boolean;
readonly output: string;
readonly artifacts: ReadonlyArray<TaskArtifact>;
readonly summary: string;
}
export interface TaskArtifact {
readonly type: "file" | "diff" | "report" | "data";
readonly name: string;
readonly path?: string;
readonly content?: string;
}
export interface TaskError {
readonly code: string;
readonly message: string;
readonly stack?: string;
readonly recoverable: boolean;
}
export interface TaskMetadata {
readonly sessionId: string;
readonly agentId?: string;
readonly prompt?: string;
readonly tools: ReadonlyArray<string>;
readonly startedByUser: boolean;
}
export interface BackgroundTaskConfig {
readonly maxConcurrent: number;
readonly defaultTimeout: number;
readonly retryOnFailure: boolean;
readonly maxRetries: number;
readonly notifyOnComplete: boolean;
readonly persistTasks: boolean;
}
export interface TaskNotification {
readonly taskId: string;
readonly type: "started" | "progress" | "completed" | "failed";
readonly message: string;
readonly timestamp: number;
}
export interface BackgroundTaskStore {
readonly tasks: ReadonlyMap<string, BackgroundTask>;
readonly queue: ReadonlyArray<string>;
readonly running: ReadonlyArray<string>;
readonly completed: ReadonlyArray<string>;
}
export const DEFAULT_BACKGROUND_TASK_CONFIG: BackgroundTaskConfig = {
maxConcurrent: 3,
defaultTimeout: 300000, // 5 minutes
retryOnFailure: false,
maxRetries: 1,
notifyOnComplete: true,
persistTasks: true,
};
export const BACKGROUND_TASK_PRIORITIES: Record<BackgroundTaskPriority, number> = {
low: 1,
normal: 5,
high: 10,
};

194
src/types/brain-cloud.ts Normal file
View File

@@ -0,0 +1,194 @@
/**
* Brain Cloud Sync Types
*
* Types for cloud synchronization of brain data.
*/
/**
* Sync status
*/
export type BrainSyncStatus =
| "synced"
| "pending"
| "syncing"
| "conflict"
| "offline"
| "error";
/**
* Conflict resolution strategy
*/
export type ConflictStrategy =
| "local-wins"
| "remote-wins"
| "manual"
| "merge";
/**
* Sync direction
*/
export type SyncDirection = "push" | "pull" | "both";
/**
* Sync operation type
*/
export type SyncOperationType =
| "create"
| "update"
| "delete"
| "conflict";
/**
* Brain sync state
*/
export interface BrainSyncState {
status: BrainSyncStatus;
lastSyncAt: number | null;
lastPushAt: number | null;
lastPullAt: number | null;
pendingChanges: number;
conflictCount: number;
syncErrors: string[];
}
/**
* Cloud brain configuration
*/
export interface CloudBrainConfig {
enabled: boolean;
endpoint: string;
syncOnSessionEnd: boolean;
syncInterval: number;
conflictStrategy: ConflictStrategy;
retryAttempts: number;
retryDelay: number;
}
/**
* Sync item representing a change
*/
export interface SyncItem {
id: string;
type: "concept" | "memory" | "relation";
operation: SyncOperationType;
localVersion: number;
remoteVersion?: number;
data: unknown;
timestamp: number;
synced: boolean;
}
/**
* Sync conflict
*/
export interface SyncConflict {
id: string;
itemId: string;
itemType: "concept" | "memory" | "relation";
localData: unknown;
remoteData: unknown;
localVersion: number;
remoteVersion: number;
localTimestamp: number;
remoteTimestamp: number;
resolved: boolean;
resolution?: ConflictStrategy;
resolvedData?: unknown;
}
/**
* Sync result
*/
export interface SyncResult {
success: boolean;
direction: SyncDirection;
itemsSynced: number;
itemsFailed: number;
conflicts: SyncConflict[];
errors: string[];
duration: number;
timestamp: number;
}
/**
* Push request
*/
export interface PushRequest {
items: SyncItem[];
projectId: number;
clientVersion: string;
}
/**
* Push response
*/
export interface PushResponse {
success: boolean;
accepted: number;
rejected: number;
conflicts: SyncConflict[];
serverVersion: number;
errors?: string[];
}
/**
* Pull request
*/
export interface PullRequest {
projectId: number;
sinceVersion: number;
sinceTimestamp: number;
limit?: number;
}
/**
* Pull response
*/
export interface PullResponse {
success: boolean;
items: SyncItem[];
serverVersion: number;
hasMore: boolean;
errors?: string[];
}
/**
* Offline queue item
*/
export interface OfflineQueueItem {
id: string;
item: SyncItem;
retryCount: number;
lastAttempt: number;
error?: string;
}
/**
* Offline queue state
*/
export interface OfflineQueueState {
items: OfflineQueueItem[];
totalSize: number;
oldestItem: number | null;
}
/**
* Sync progress event
*/
export interface SyncProgressEvent {
phase: "preparing" | "pushing" | "pulling" | "resolving" | "completing";
current: number;
total: number;
message: string;
}
/**
* Sync options
*/
export interface SyncOptions {
direction?: SyncDirection;
force?: boolean;
conflictStrategy?: ConflictStrategy;
onProgress?: (event: SyncProgressEvent) => void;
abortSignal?: AbortSignal;
}

Some files were not shown because too many files have changed in this diff Show More