Add BRAIN_DISABLED flag and fix Ollama tool call formatting

Features:
  - Add BRAIN_DISABLED feature flag to hide all Brain functionality
  - When enabled, hides Brain banner, status indicator, menu, and commands
  - Flag location: src/constants/brain.ts

  Fixes:
  - Fix Ollama 400 error by properly formatting tool_calls in messages
  - Update OllamaMessage type to include tool_calls field
  - Fix Brain menu keyboard not working (add missing modes to isMenuOpen)

  UI Changes:
  - Remove "^Tab toggle mode" hint from status bar
  - Remove "ctrl+t to hide todos" hint from status bar

  Files modified:
  - src/constants/brain.ts (add BRAIN_DISABLED flag)
  - src/types/ollama.ts (add tool_calls to OllamaMessage)
  - src/providers/ollama/chat.ts (format tool_calls in messages)
  - src/tui-solid/components/header.tsx (hide Brain UI when disabled)
  - src/tui-solid/components/status-bar.tsx (remove hints)
  - src/tui-solid/components/command-menu.tsx (filter brain command)
  - src/tui-solid/components/input-area.tsx (fix isMenuOpen modes)
  - src/tui-solid/routes/session.tsx (skip brain menu when disabled)
  - src/services/brain.ts (early return when disabled)
  - src/services/chat-tui/initialize.ts (skip brain init when disabled)
This commit is contained in:
2026-02-02 13:25:38 -05:00
parent 2eadda584a
commit c839fc4d68
114 changed files with 17243 additions and 273 deletions

View File

@@ -0,0 +1,366 @@
/**
* Apply Patch Execution
*
* Applies unified diff patches to files with fuzzy matching and rollback support.
*/
import fs from "fs/promises";
import { dirname, join, isAbsolute } from "path";
import {
PATCH_DEFAULTS,
PATCH_ERRORS,
PATCH_MESSAGES,
PATCH_TITLES,
} from "@constants/apply-patch";
import { parsePatch, validatePatch, getTargetPath, reversePatch } from "@tools/apply-patch/parser";
import { findHunkPosition, isHunkApplied, previewHunkApplication } from "@tools/apply-patch/matcher";
import type { ApplyPatchParams } from "@tools/apply-patch/params";
import type {
FilePatchResult,
HunkApplicationResult,
PatchRollback,
ParsedFilePatch,
} from "@/types/apply-patch";
import type { ToolContext, ToolResult } from "@tools/types";
// Rollback storage (in-memory for session)
const rollbackStore: Map<string, PatchRollback> = new Map();
/**
* Execute the apply_patch tool
*/
export const executeApplyPatch = async (
params: ApplyPatchParams,
ctx: ToolContext,
): Promise<ToolResult> => {
try {
// Parse the patch
const parsedPatch = parsePatch(params.patch);
// Validate the patch
const validation = validatePatch(parsedPatch);
if (!validation.valid) {
return {
success: false,
title: PATCH_TITLES.FAILED,
output: "",
error: validation.errors.join("\n"),
};
}
// Apply to each file
const results: FilePatchResult[] = [];
let totalPatched = 0;
let totalFailed = 0;
for (let filePatch of parsedPatch.files) {
// Skip binary files
if (filePatch.isBinary) {
results.push({
success: true,
filePath: getTargetPath(filePatch),
hunksApplied: 0,
hunksFailed: 0,
hunkResults: [],
error: PATCH_MESSAGES.SKIPPED_BINARY(getTargetPath(filePatch)),
});
continue;
}
// Reverse if requested
if (params.reverse) {
filePatch = reversePatch(filePatch);
}
// Determine target file path
const targetPath = params.targetFile ?? getTargetPath(filePatch);
const absolutePath = isAbsolute(targetPath)
? targetPath
: join(ctx.workingDir, targetPath);
// Apply the file patch
const result = await applyFilePatch(
filePatch,
absolutePath,
{
fuzz: params.fuzz ?? PATCH_DEFAULTS.FUZZ,
dryRun: params.dryRun ?? false,
},
);
results.push(result);
if (result.success) {
totalPatched++;
} else {
totalFailed++;
}
}
// Build output
const output = formatPatchResults(results, params.dryRun ?? false);
// Determine overall success
const success = totalFailed === 0;
const title = params.dryRun
? PATCH_TITLES.DRY_RUN
: totalFailed === 0
? PATCH_TITLES.SUCCESS(totalPatched)
: totalPatched > 0
? PATCH_TITLES.PARTIAL(totalPatched, totalFailed)
: PATCH_TITLES.FAILED;
return {
success,
title,
output,
error: success ? undefined : `${totalFailed} file(s) failed to patch`,
};
} catch (error) {
const message = error instanceof Error ? error.message : String(error);
return {
success: false,
title: PATCH_TITLES.FAILED,
output: "",
error: PATCH_ERRORS.PARSE_FAILED(message),
};
}
};
/**
* Apply a patch to a single file
*/
const applyFilePatch = async (
filePatch: ParsedFilePatch,
targetPath: string,
options: { fuzz: number; dryRun: boolean },
): Promise<FilePatchResult> => {
const hunkResults: HunkApplicationResult[] = [];
let currentContent: string;
let originalContent: string;
try {
// Handle new files
if (filePatch.isNew) {
currentContent = "";
originalContent = "";
} else {
// Read original file
try {
currentContent = await fs.readFile(targetPath, "utf-8");
originalContent = currentContent;
} catch {
return {
success: false,
filePath: targetPath,
hunksApplied: 0,
hunksFailed: filePatch.hunks.length,
hunkResults: [],
error: PATCH_ERRORS.FILE_NOT_FOUND(targetPath),
};
}
}
// Handle deleted files
if (filePatch.isDeleted) {
if (!options.dryRun) {
// Store rollback info
rollbackStore.set(targetPath, {
filePath: targetPath,
originalContent,
patchedContent: "",
timestamp: Date.now(),
});
await fs.unlink(targetPath);
}
return {
success: true,
filePath: targetPath,
hunksApplied: 1,
hunksFailed: 0,
hunkResults: [
{
success: true,
hunkIndex: 0,
appliedAt: 0,
},
],
newContent: "",
};
}
// Apply each hunk
let hunksApplied = 0;
let hunksFailed = 0;
for (let i = 0; i < filePatch.hunks.length; i++) {
const hunk = filePatch.hunks[i];
// Check if already applied
if (isHunkApplied(currentContent, hunk, { fuzz: options.fuzz })) {
hunkResults.push({
success: true,
hunkIndex: i,
appliedAt: hunk.oldStart - 1,
});
hunksApplied++;
continue;
}
// Find position with fuzzy matching
const position = findHunkPosition(currentContent, hunk, { fuzz: options.fuzz });
if (!position.found) {
hunkResults.push({
success: false,
hunkIndex: i,
error: PATCH_ERRORS.FUZZY_MATCH_FAILED(i),
});
hunksFailed++;
continue;
}
// Apply the hunk
const preview = previewHunkApplication(currentContent, hunk, position.lineNumber);
if (!preview.success) {
hunkResults.push({
success: false,
hunkIndex: i,
error: preview.error ?? PATCH_ERRORS.HUNK_FAILED(i, "unknown"),
});
hunksFailed++;
continue;
}
currentContent = preview.preview.join("\n");
hunksApplied++;
hunkResults.push({
success: true,
hunkIndex: i,
appliedAt: position.lineNumber,
fuzzyOffset: position.offset !== 0 ? position.offset : undefined,
});
}
// Write the file if not dry run
if (!options.dryRun && hunksApplied > 0) {
// Store rollback info
rollbackStore.set(targetPath, {
filePath: targetPath,
originalContent,
patchedContent: currentContent,
timestamp: Date.now(),
});
// Ensure directory exists
await fs.mkdir(dirname(targetPath), { recursive: true });
// Write patched content
await fs.writeFile(targetPath, currentContent, "utf-8");
}
return {
success: hunksFailed === 0,
filePath: targetPath,
hunksApplied,
hunksFailed,
hunkResults,
newContent: currentContent,
};
} catch (error) {
const message = error instanceof Error ? error.message : String(error);
return {
success: false,
filePath: targetPath,
hunksApplied: 0,
hunksFailed: filePatch.hunks.length,
hunkResults,
error: PATCH_ERRORS.WRITE_FAILED(targetPath, message),
};
}
};
/**
* Format patch results for output
*/
const formatPatchResults = (
results: FilePatchResult[],
dryRun: boolean,
): string => {
const lines: string[] = [];
for (const result of results) {
lines.push(`${result.success ? "✓" : "✗"} ${result.filePath}`);
if (result.hunksApplied > 0 || result.hunksFailed > 0) {
lines.push(
` ${result.hunksApplied} hunk(s) applied, ${result.hunksFailed} failed`,
);
}
// Show fuzzy offsets
for (const hunk of result.hunkResults) {
if (hunk.fuzzyOffset) {
lines.push(
` ${PATCH_MESSAGES.FUZZY_APPLIED(hunk.hunkIndex, hunk.fuzzyOffset)}`,
);
}
}
if (result.error) {
lines.push(` Error: ${result.error}`);
}
}
if (dryRun) {
lines.push("");
lines.push("(dry run - no changes were made)");
} else if (results.some((r) => r.success)) {
lines.push("");
lines.push(PATCH_MESSAGES.ROLLBACK_AVAILABLE);
}
return lines.join("\n");
};
/**
* Rollback a patched file
*/
export const rollbackPatch = async (filePath: string): Promise<boolean> => {
const rollback = rollbackStore.get(filePath);
if (!rollback) {
return false;
}
try {
if (rollback.originalContent === "") {
// Was a new file, delete it
await fs.unlink(filePath);
} else {
await fs.writeFile(filePath, rollback.originalContent, "utf-8");
}
rollbackStore.delete(filePath);
return true;
} catch {
return false;
}
};
/**
* Get available rollbacks
*/
export const getAvailableRollbacks = (): string[] => {
return Array.from(rollbackStore.keys());
};
/**
* Clear rollback history
*/
export const clearRollbacks = (): void => {
rollbackStore.clear();
};

View File

@@ -0,0 +1,60 @@
/**
* Apply Patch Tool
*
* Applies unified diff patches to files with fuzzy matching support.
*/
import type { ToolDefinition } from "@tools/types";
import { applyPatchParams } from "@tools/apply-patch/params";
import { executeApplyPatch } from "@tools/apply-patch/execute";
export { applyPatchParams } from "@tools/apply-patch/params";
export { executeApplyPatch, rollbackPatch, getAvailableRollbacks, clearRollbacks } from "@tools/apply-patch/execute";
export { parsePatch, validatePatch, getTargetPath, reversePatch } from "@tools/apply-patch/parser";
export { findHunkPosition, isHunkApplied, previewHunkApplication } from "@tools/apply-patch/matcher";
/**
* Tool description
*/
const APPLY_PATCH_DESCRIPTION = `Apply a unified diff patch to one or more files.
Use this tool to:
- Apply changes from a diff/patch
- Update files based on patch content
- Preview changes before applying (dry run)
Parameters:
- patch: The unified diff content (required)
- targetFile: Override the target file path (optional)
- dryRun: Preview without applying changes (default: false)
- fuzz: Context line tolerance 0-3 (default: 2)
- reverse: Apply patch in reverse to undo changes (default: false)
The tool supports:
- Standard unified diff format (git diff, diff -u)
- Fuzzy context matching when lines have shifted
- Creating new files
- Deleting files
- Rollback on failure
Example patch format:
\`\`\`
--- a/src/example.ts
+++ b/src/example.ts
@@ -10,6 +10,7 @@ function example() {
const a = 1;
const b = 2;
+ const c = 3;
return a + b;
}
\`\`\``;
/**
* Apply patch tool definition
*/
export const applyPatchTool: ToolDefinition = {
name: "apply_patch",
description: APPLY_PATCH_DESCRIPTION,
parameters: applyPatchParams,
execute: executeApplyPatch,
};

View File

@@ -0,0 +1,304 @@
/**
* Fuzzy Matcher
*
* Finds patch context in target file with fuzzy matching support.
*/
import { PATCH_DEFAULTS } from "@constants/apply-patch";
import type {
PatchHunk,
FuzzyMatchResult,
ContextMatchOptions,
} from "@/types/apply-patch";
/**
* Default match options
*/
const DEFAULT_MATCH_OPTIONS: ContextMatchOptions = {
fuzz: PATCH_DEFAULTS.FUZZ,
ignoreWhitespace: PATCH_DEFAULTS.IGNORE_WHITESPACE,
ignoreCase: PATCH_DEFAULTS.IGNORE_CASE,
};
/**
* Normalize line for comparison
*/
const normalizeLine = (
line: string,
options: ContextMatchOptions,
): string => {
let normalized = line;
if (options.ignoreWhitespace) {
normalized = normalized.replace(/\s+/g, " ").trim();
}
if (options.ignoreCase) {
normalized = normalized.toLowerCase();
}
return normalized;
};
/**
* Extract context and deletion lines from hunk (lines that should exist in original)
*/
const extractOriginalLines = (hunk: PatchHunk): string[] => {
return hunk.lines
.filter((line) => line.type === "context" || line.type === "deletion")
.map((line) => line.content);
};
/**
* Check if lines match at a given position
*/
const checkMatchAtPosition = (
fileLines: string[],
originalLines: string[],
startLine: number,
options: ContextMatchOptions,
): { matches: boolean; confidence: number } => {
let matchCount = 0;
let totalLines = originalLines.length;
// Can't match if we don't have enough lines
if (startLine + originalLines.length > fileLines.length) {
return { matches: false, confidence: 0 };
}
for (let i = 0; i < originalLines.length; i++) {
const fileLine = normalizeLine(fileLines[startLine + i], options);
const patchLine = normalizeLine(originalLines[i], options);
if (fileLine === patchLine) {
matchCount++;
}
}
const confidence = totalLines > 0 ? matchCount / totalLines : 0;
// Require at least (total - fuzz) lines to match
const requiredMatches = Math.max(1, totalLines - options.fuzz);
const matches = matchCount >= requiredMatches;
return { matches, confidence };
};
/**
* Find the best match position for a hunk in file content
*/
export const findHunkPosition = (
fileContent: string,
hunk: PatchHunk,
options: Partial<ContextMatchOptions> = {},
): FuzzyMatchResult => {
const fullOptions: ContextMatchOptions = {
...DEFAULT_MATCH_OPTIONS,
...options,
};
const fileLines = fileContent.split("\n");
const originalLines = extractOriginalLines(hunk);
// If hunk has no lines to match, use the line number directly
if (originalLines.length === 0) {
const targetLine = Math.min(hunk.oldStart - 1, fileLines.length);
return {
found: true,
lineNumber: targetLine,
offset: 0,
confidence: 1,
};
}
// Expected position (0-indexed)
const expectedLine = hunk.oldStart - 1;
// First, try exact position
const exactMatch = checkMatchAtPosition(
fileLines,
originalLines,
expectedLine,
fullOptions,
);
if (exactMatch.matches && exactMatch.confidence === 1) {
return {
found: true,
lineNumber: expectedLine,
offset: 0,
confidence: exactMatch.confidence,
};
}
// Search within fuzz range
const maxOffset = fullOptions.fuzz * PATCH_DEFAULTS.CONTEXT_LINES;
let bestMatch: FuzzyMatchResult | null = null;
for (let offset = 1; offset <= maxOffset; offset++) {
// Try before expected position
const beforePos = expectedLine - offset;
if (beforePos >= 0) {
const beforeMatch = checkMatchAtPosition(
fileLines,
originalLines,
beforePos,
fullOptions,
);
if (beforeMatch.matches) {
if (!bestMatch || beforeMatch.confidence > bestMatch.confidence) {
bestMatch = {
found: true,
lineNumber: beforePos,
offset: -offset,
confidence: beforeMatch.confidence,
};
}
}
}
// Try after expected position
const afterPos = expectedLine + offset;
if (afterPos < fileLines.length) {
const afterMatch = checkMatchAtPosition(
fileLines,
originalLines,
afterPos,
fullOptions,
);
if (afterMatch.matches) {
if (!bestMatch || afterMatch.confidence > bestMatch.confidence) {
bestMatch = {
found: true,
lineNumber: afterPos,
offset: offset,
confidence: afterMatch.confidence,
};
}
}
}
// If we found a perfect match, stop searching
if (bestMatch && bestMatch.confidence === 1) {
break;
}
}
// Return best match if found
if (bestMatch) {
return bestMatch;
}
// If exact position had a partial match, return it
if (exactMatch.confidence > 0.5) {
return {
found: true,
lineNumber: expectedLine,
offset: 0,
confidence: exactMatch.confidence,
};
}
return {
found: false,
lineNumber: -1,
offset: 0,
confidence: 0,
};
};
/**
* Check if a hunk is already applied (deletions are gone, additions are present)
*/
export const isHunkApplied = (
fileContent: string,
hunk: PatchHunk,
options: Partial<ContextMatchOptions> = {},
): boolean => {
const fullOptions: ContextMatchOptions = {
...DEFAULT_MATCH_OPTIONS,
...options,
};
const fileLines = fileContent.split("\n");
// Check if additions are present and deletions are not
let additionsPresent = 0;
let deletionsAbsent = 0;
for (const line of hunk.lines) {
const normalizedContent = normalizeLine(line.content, fullOptions);
if (line.type === "addition") {
const found = fileLines.some(
(fl) => normalizeLine(fl, fullOptions) === normalizedContent,
);
if (found) additionsPresent++;
}
if (line.type === "deletion") {
const found = fileLines.some(
(fl) => normalizeLine(fl, fullOptions) === normalizedContent,
);
if (!found) deletionsAbsent++;
}
}
const totalAdditions = hunk.lines.filter((l) => l.type === "addition").length;
const totalDeletions = hunk.lines.filter((l) => l.type === "deletion").length;
// Consider applied if most additions are present and most deletions are absent
const additionsMatch =
totalAdditions === 0 || additionsPresent >= totalAdditions * 0.8;
const deletionsMatch =
totalDeletions === 0 || deletionsAbsent >= totalDeletions * 0.8;
return additionsMatch && deletionsMatch;
};
/**
* Calculate the expected result of applying a hunk
*/
export const previewHunkApplication = (
fileContent: string,
hunk: PatchHunk,
position: number,
): { success: boolean; preview: string[]; error?: string } => {
const fileLines = fileContent.split("\n");
const resultLines: string[] = [];
// Copy lines before the hunk
for (let i = 0; i < position; i++) {
resultLines.push(fileLines[i]);
}
// Calculate how many lines to skip from the original file
let originalLinesConsumed = 0;
for (const line of hunk.lines) {
if (line.type === "context" || line.type === "deletion") {
originalLinesConsumed++;
}
}
// Apply hunk transformations
for (const line of hunk.lines) {
if (line.type === "context") {
resultLines.push(line.content);
} else if (line.type === "addition") {
resultLines.push(line.content);
}
// Deletions are skipped (not added to result)
}
// Copy lines after the hunk
for (let i = position + originalLinesConsumed; i < fileLines.length; i++) {
resultLines.push(fileLines[i]);
}
return {
success: true,
preview: resultLines,
};
};

View File

@@ -0,0 +1,43 @@
/**
* Apply Patch Tool Parameters
*/
import { z } from "zod";
import { PATCH_DEFAULTS } from "@constants/apply-patch";
/**
* Zod schema for apply_patch tool parameters
*/
export const applyPatchParams = z.object({
patch: z
.string()
.describe("The unified diff patch content to apply"),
targetFile: z
.string()
.optional()
.describe("Override the target file path from the patch header"),
dryRun: z
.boolean()
.optional()
.default(false)
.describe("Validate and preview changes without actually applying them"),
fuzz: z
.number()
.int()
.min(0)
.max(PATCH_DEFAULTS.MAX_FUZZ)
.optional()
.default(PATCH_DEFAULTS.FUZZ)
.describe(`Context line tolerance for fuzzy matching (0-${PATCH_DEFAULTS.MAX_FUZZ})`),
reverse: z
.boolean()
.optional()
.default(false)
.describe("Apply the patch in reverse (undo the changes)"),
});
export type ApplyPatchParams = z.infer<typeof applyPatchParams>;

View File

@@ -0,0 +1,387 @@
/**
* Patch Parser
*
* Parses unified diff format patches into structured data.
*/
import {
PATCH_PATTERNS,
LINE_PREFIXES,
SPECIAL_PATHS,
PATCH_ERRORS,
} from "@constants/apply-patch";
import type {
ParsedPatch,
ParsedFilePatch,
PatchHunk,
PatchLine,
PatchLineType,
PatchValidationResult,
} from "@/types/apply-patch";
/**
* Parse a unified diff patch string
*/
export const parsePatch = (patchContent: string): ParsedPatch => {
const lines = patchContent.split("\n");
const files: ParsedFilePatch[] = [];
let currentFile: ParsedFilePatch | null = null;
let currentHunk: PatchHunk | null = null;
let lineIndex = 0;
while (lineIndex < lines.length) {
const line = lines[lineIndex];
// Git diff header
const gitDiffMatch = line.match(PATCH_PATTERNS.GIT_DIFF);
if (gitDiffMatch) {
if (currentFile && (currentFile.hunks.length > 0 || currentFile.isBinary)) {
files.push(currentFile);
}
currentFile = createEmptyFilePatch(gitDiffMatch[1], gitDiffMatch[2]);
currentHunk = null;
lineIndex++;
continue;
}
// File header old
const oldHeaderMatch = line.match(PATCH_PATTERNS.FILE_HEADER_OLD);
if (oldHeaderMatch) {
if (!currentFile) {
currentFile = createEmptyFilePatch("", "");
}
currentFile.oldPath = cleanPath(oldHeaderMatch[1]);
if (currentFile.oldPath === SPECIAL_PATHS.DEV_NULL) {
currentFile.isNew = true;
}
lineIndex++;
continue;
}
// File header new
const newHeaderMatch = line.match(PATCH_PATTERNS.FILE_HEADER_NEW);
if (newHeaderMatch) {
if (!currentFile) {
currentFile = createEmptyFilePatch("", "");
}
currentFile.newPath = cleanPath(newHeaderMatch[1]);
if (currentFile.newPath === SPECIAL_PATHS.DEV_NULL) {
currentFile.isDeleted = true;
}
lineIndex++;
continue;
}
// Index line (skip)
if (PATCH_PATTERNS.INDEX_LINE.test(line)) {
lineIndex++;
continue;
}
// Binary file
if (PATCH_PATTERNS.BINARY_FILE.test(line)) {
if (currentFile) {
currentFile.isBinary = true;
}
lineIndex++;
continue;
}
// New file mode
if (PATCH_PATTERNS.NEW_FILE.test(line)) {
if (currentFile) {
currentFile.isNew = true;
}
lineIndex++;
continue;
}
// Deleted file mode
if (PATCH_PATTERNS.DELETED_FILE.test(line)) {
if (currentFile) {
currentFile.isDeleted = true;
}
lineIndex++;
continue;
}
// Rename from
const renameFromMatch = line.match(PATCH_PATTERNS.RENAME_FROM);
if (renameFromMatch) {
if (currentFile) {
currentFile.isRenamed = true;
currentFile.oldPath = cleanPath(renameFromMatch[1]);
}
lineIndex++;
continue;
}
// Rename to
const renameToMatch = line.match(PATCH_PATTERNS.RENAME_TO);
if (renameToMatch) {
if (currentFile) {
currentFile.newPath = cleanPath(renameToMatch[1]);
}
lineIndex++;
continue;
}
// Hunk header
const hunkMatch = line.match(PATCH_PATTERNS.HUNK_HEADER);
if (hunkMatch) {
if (currentHunk && currentFile) {
currentFile.hunks.push(currentHunk);
}
currentHunk = {
oldStart: parseInt(hunkMatch[1], 10),
oldLines: hunkMatch[2] ? parseInt(hunkMatch[2], 10) : 1,
newStart: parseInt(hunkMatch[3], 10),
newLines: hunkMatch[4] ? parseInt(hunkMatch[4], 10) : 1,
lines: [],
header: line,
};
lineIndex++;
continue;
}
// Patch lines (context, addition, deletion)
if (currentHunk) {
const patchLine = parsePatchLine(line, currentHunk);
if (patchLine) {
currentHunk.lines.push(patchLine);
}
}
lineIndex++;
}
// Push final hunk and file
if (currentHunk && currentFile) {
currentFile.hunks.push(currentHunk);
}
if (currentFile && (currentFile.hunks.length > 0 || currentFile.isBinary)) {
files.push(currentFile);
}
return {
files,
rawPatch: patchContent,
};
};
/**
* Create empty file patch structure
*/
const createEmptyFilePatch = (oldPath: string, newPath: string): ParsedFilePatch => ({
oldPath: cleanPath(oldPath),
newPath: cleanPath(newPath),
hunks: [],
isBinary: false,
isNew: false,
isDeleted: false,
isRenamed: false,
});
/**
* Clean path by removing a/ or b/ prefixes
*/
const cleanPath = (path: string): string => {
if (path.startsWith(SPECIAL_PATHS.A_PREFIX)) {
return path.slice(2);
}
if (path.startsWith(SPECIAL_PATHS.B_PREFIX)) {
return path.slice(2);
}
return path;
};
/**
* Parse a single patch line
*/
const parsePatchLine = (line: string, _hunk: PatchHunk): PatchLine | null => {
// No newline marker (skip but keep in mind)
if (PATCH_PATTERNS.NO_NEWLINE.test(line)) {
return null;
}
// Empty line at end of patch
if (line === "") {
return null;
}
const prefix = line[0];
const content = line.slice(1);
const typeMap: Record<string, PatchLineType> = {
[LINE_PREFIXES.CONTEXT]: "context",
[LINE_PREFIXES.ADDITION]: "addition",
[LINE_PREFIXES.DELETION]: "deletion",
};
const type = typeMap[prefix];
if (!type) {
// Unknown line type, treat as context if it looks like content
return {
type: "context",
content: line,
};
}
return {
type,
content,
};
};
/**
* Validate a parsed patch
*/
export const validatePatch = (patch: ParsedPatch): PatchValidationResult => {
const errors: string[] = [];
const warnings: string[] = [];
let hunkCount = 0;
if (patch.files.length === 0) {
errors.push(PATCH_ERRORS.INVALID_PATCH);
return {
valid: false,
errors,
warnings,
fileCount: 0,
hunkCount: 0,
};
}
for (const file of patch.files) {
// Check for binary files
if (file.isBinary) {
warnings.push(PATCH_ERRORS.BINARY_NOT_SUPPORTED);
continue;
}
// Check file paths
if (!file.newPath && !file.isDeleted) {
errors.push(`Missing target path for file`);
}
// Validate hunks
for (const hunk of file.hunks) {
hunkCount++;
// Count lines
let contextCount = 0;
let additionCount = 0;
let deletionCount = 0;
for (const line of hunk.lines) {
if (line.type === "context") contextCount++;
if (line.type === "addition") additionCount++;
if (line.type === "deletion") deletionCount++;
}
// Verify hunk line counts
const expectedOld = contextCount + deletionCount;
const expectedNew = contextCount + additionCount;
if (expectedOld !== hunk.oldLines) {
warnings.push(
`Hunk line count mismatch: expected ${hunk.oldLines} old lines, found ${expectedOld}`,
);
}
if (expectedNew !== hunk.newLines) {
warnings.push(
`Hunk line count mismatch: expected ${hunk.newLines} new lines, found ${expectedNew}`,
);
}
}
}
return {
valid: errors.length === 0,
errors,
warnings,
fileCount: patch.files.length,
hunkCount,
};
};
/**
* Get the target file path from a parsed file patch
*/
export const getTargetPath = (filePatch: ParsedFilePatch): string => {
// For new files, use the new path
if (filePatch.isNew) {
return filePatch.newPath;
}
// For deleted files, use the old path
if (filePatch.isDeleted) {
return filePatch.oldPath;
}
// For renames, we want to modify the old file and rename to new
// For regular patches, prefer newPath but fall back to oldPath
return filePatch.newPath || filePatch.oldPath;
};
/**
* Check if a patch appears to be reversed
*/
export const isPatchReversed = (
patch: ParsedFilePatch,
fileContent: string,
): boolean => {
// Simple heuristic: check if the "added" lines are present in the file
// and "deleted" lines are not
const fileLines = new Set(fileContent.split("\n"));
let addedPresent = 0;
let deletedPresent = 0;
for (const hunk of patch.hunks) {
for (const line of hunk.lines) {
if (line.type === "addition" && fileLines.has(line.content)) {
addedPresent++;
}
if (line.type === "deletion" && fileLines.has(line.content)) {
deletedPresent++;
}
}
}
// If added lines are present and deleted lines are not, patch is reversed
return addedPresent > deletedPresent * 2;
};
/**
* Reverse a patch (swap additions and deletions)
*/
export const reversePatch = (patch: ParsedFilePatch): ParsedFilePatch => {
return {
...patch,
oldPath: patch.newPath,
newPath: patch.oldPath,
isNew: patch.isDeleted,
isDeleted: patch.isNew,
hunks: patch.hunks.map((hunk) => ({
...hunk,
oldStart: hunk.newStart,
oldLines: hunk.newLines,
newStart: hunk.oldStart,
newLines: hunk.oldLines,
lines: hunk.lines.map((line) => ({
...line,
type:
line.type === "addition"
? "deletion"
: line.type === "deletion"
? "addition"
: line.type,
})),
})),
};
};

View File

@@ -12,7 +12,10 @@ export { todoReadTool } from "@tools/todo-read";
export { globToolDefinition } from "@tools/glob/definition";
export { grepToolDefinition } from "@tools/grep/definition";
export { webSearchTool } from "@tools/web-search";
export { webFetchTool } from "@tools/web-fetch";
export { multiEditTool } from "@tools/multi-edit";
export { lspTool } from "@tools/lsp";
export { applyPatchTool } from "@tools/apply-patch";
import type { ToolDefinition, FunctionDefinition } from "@tools/types";
import { toolToFunction } from "@tools/types";
@@ -25,7 +28,10 @@ import { todoReadTool } from "@tools/todo-read";
import { globToolDefinition } from "@tools/glob/definition";
import { grepToolDefinition } from "@tools/grep/definition";
import { webSearchTool } from "@tools/web-search";
import { webFetchTool } from "@tools/web-fetch";
import { multiEditTool } from "@tools/multi-edit";
import { lspTool } from "@tools/lsp";
import { applyPatchTool } from "@tools/apply-patch";
import {
isMCPTool,
executeMCPTool,
@@ -44,12 +50,15 @@ export const tools: ToolDefinition[] = [
readTool,
writeTool,
editTool,
multiEditTool,
globToolDefinition,
grepToolDefinition,
todoWriteTool,
todoReadTool,
webSearchTool,
webFetchTool,
lspTool,
applyPatchTool,
];
// Tools that are read-only (allowed in chat mode)
@@ -59,6 +68,7 @@ const READ_ONLY_TOOLS = new Set([
"grep",
"todo_read",
"web_search",
"web_fetch",
"lsp",
]);

View File

@@ -0,0 +1,343 @@
/**
* MultiEdit Tool Execution
*
* Performs batch file editing with atomic transactions
*/
import fs from "fs/promises";
import path from "path";
import {
MULTI_EDIT_DEFAULTS,
MULTI_EDIT_MESSAGES,
MULTI_EDIT_TITLES,
MULTI_EDIT_DESCRIPTION,
} from "@constants/multi-edit";
import { isFileOpAllowed, promptFilePermission } from "@services/permissions";
import { formatDiff, generateDiff } from "@utils/diff";
import { multiEditParams } from "@tools/multi-edit/params";
import type { ToolDefinition, ToolContext, ToolResult } from "@/types/tools";
import type { EditItem, MultiEditParams } from "@tools/multi-edit/params";
interface FileBackup {
path: string;
content: string;
}
interface EditValidation {
valid: boolean;
error?: string;
fileContent?: string;
}
interface EditResult {
path: string;
success: boolean;
diff?: string;
additions?: number;
deletions?: number;
error?: string;
}
const createErrorResult = (error: string): ToolResult => ({
success: false,
title: MULTI_EDIT_TITLES.FAILED,
output: "",
error,
});
const createSuccessResult = (
results: EditResult[],
totalEdits: number,
): ToolResult => {
const successful = results.filter((r) => r.success);
const failed = results.filter((r) => !r.success);
const diffOutput = successful
.map((r) => `## ${path.basename(r.path)}\n\n${r.diff}`)
.join("\n\n---\n\n");
const totalAdditions = successful.reduce((sum, r) => sum + (r.additions ?? 0), 0);
const totalDeletions = successful.reduce((sum, r) => sum + (r.deletions ?? 0), 0);
const title =
failed.length > 0
? MULTI_EDIT_TITLES.PARTIAL(successful.length, failed.length)
: MULTI_EDIT_TITLES.SUCCESS(successful.length);
let output = diffOutput;
if (failed.length > 0) {
output +=
"\n\n## Failed Edits\n\n" +
failed.map((r) => `- ${r.path}: ${r.error}`).join("\n");
}
return {
success: failed.length === 0,
title,
output,
metadata: {
totalEdits,
successful: successful.length,
failed: failed.length,
totalAdditions,
totalDeletions,
},
};
};
/**
* Validate a single edit
*/
const validateEdit = async (
edit: EditItem,
workingDir: string,
): Promise<EditValidation> => {
const fullPath = path.isAbsolute(edit.file_path)
? edit.file_path
: path.join(workingDir, edit.file_path);
try {
const stat = await fs.stat(fullPath);
if (!stat.isFile()) {
return { valid: false, error: `Not a file: ${edit.file_path}` };
}
if (stat.size > MULTI_EDIT_DEFAULTS.MAX_FILE_SIZE) {
return { valid: false, error: MULTI_EDIT_MESSAGES.FILE_TOO_LARGE(edit.file_path) };
}
const content = await fs.readFile(fullPath, "utf-8");
// Check if old_string exists
if (!content.includes(edit.old_string)) {
const preview = edit.old_string.slice(0, 50);
return {
valid: false,
error: MULTI_EDIT_MESSAGES.OLD_STRING_NOT_FOUND(edit.file_path, preview),
};
}
// Check uniqueness
const occurrences = content.split(edit.old_string).length - 1;
if (occurrences > 1) {
return {
valid: false,
error: MULTI_EDIT_MESSAGES.OLD_STRING_NOT_UNIQUE(edit.file_path, occurrences),
};
}
return { valid: true, fileContent: content };
} catch (error) {
if ((error as NodeJS.ErrnoException).code === "ENOENT") {
return { valid: false, error: MULTI_EDIT_MESSAGES.FILE_NOT_FOUND(edit.file_path) };
}
const message = error instanceof Error ? error.message : String(error);
return { valid: false, error: message };
}
};
/**
* Check permissions for all files
*/
const checkPermissions = async (
edits: EditItem[],
workingDir: string,
autoApprove: boolean,
): Promise<{ allowed: boolean; denied: string[] }> => {
const denied: string[] = [];
for (const edit of edits) {
const fullPath = path.isAbsolute(edit.file_path)
? edit.file_path
: path.join(workingDir, edit.file_path);
if (!autoApprove && !isFileOpAllowed("Edit", fullPath)) {
const { allowed } = await promptFilePermission(
"Edit",
fullPath,
`Edit file: ${edit.file_path}`,
);
if (!allowed) {
denied.push(edit.file_path);
}
}
}
return { allowed: denied.length === 0, denied };
};
/**
* Apply a single edit
*/
const applyEdit = async (
edit: EditItem,
workingDir: string,
fileContent: string,
): Promise<EditResult> => {
const fullPath = path.isAbsolute(edit.file_path)
? edit.file_path
: path.join(workingDir, edit.file_path);
try {
const newContent = fileContent.replace(edit.old_string, edit.new_string);
const diff = generateDiff(fileContent, newContent);
const relativePath = path.relative(workingDir, fullPath);
const diffOutput = formatDiff(diff, relativePath);
await fs.writeFile(fullPath, newContent, "utf-8");
return {
path: edit.file_path,
success: true,
diff: diffOutput,
additions: diff.additions,
deletions: diff.deletions,
};
} catch (error) {
const message = error instanceof Error ? error.message : String(error);
return {
path: edit.file_path,
success: false,
error: message,
};
}
};
/**
* Rollback changes using backups
*/
const rollback = async (backups: FileBackup[]): Promise<void> => {
for (const backup of backups) {
try {
await fs.writeFile(backup.path, backup.content, "utf-8");
} catch {
// Best effort rollback
}
}
};
/**
* Execute multi-edit
*/
export const executeMultiEdit = async (
args: MultiEditParams,
ctx: ToolContext,
): Promise<ToolResult> => {
const { edits } = args;
// Validate edit count
if (edits.length === 0) {
return createErrorResult(MULTI_EDIT_MESSAGES.NO_EDITS);
}
if (edits.length > MULTI_EDIT_DEFAULTS.MAX_EDITS) {
return createErrorResult(
MULTI_EDIT_MESSAGES.TOO_MANY_EDITS(MULTI_EDIT_DEFAULTS.MAX_EDITS),
);
}
ctx.onMetadata?.({
title: MULTI_EDIT_TITLES.VALIDATING(edits.length),
status: "running",
});
// Phase 1: Validate all edits
const validations = new Map<string, { validation: EditValidation; edit: EditItem }>();
const errors: string[] = [];
for (const edit of edits) {
const validation = await validateEdit(edit, ctx.workingDir);
validations.set(edit.file_path, { validation, edit });
if (!validation.valid) {
errors.push(validation.error ?? "Unknown error");
}
}
if (errors.length > 0) {
return createErrorResult(
MULTI_EDIT_MESSAGES.VALIDATION_FAILED + ":\n" + errors.join("\n"),
);
}
// Phase 2: Check permissions
const permCheck = await checkPermissions(
edits,
ctx.workingDir,
ctx.autoApprove ?? false,
);
if (!permCheck.allowed) {
return createErrorResult(
`Permission denied for: ${permCheck.denied.join(", ")}`,
);
}
// Phase 3: Create backups and apply edits atomically
const backups: FileBackup[] = [];
const results: EditResult[] = [];
let failed = false;
for (let i = 0; i < edits.length; i++) {
const edit = edits[i];
const data = validations.get(edit.file_path);
if (!data?.validation.fileContent) continue;
ctx.onMetadata?.({
title: MULTI_EDIT_TITLES.APPLYING(i + 1, edits.length),
status: "running",
});
const fullPath = path.isAbsolute(edit.file_path)
? edit.file_path
: path.join(ctx.workingDir, edit.file_path);
// Create backup
backups.push({
path: fullPath,
content: data.validation.fileContent,
});
// Apply edit
const result = await applyEdit(edit, ctx.workingDir, data.validation.fileContent);
results.push(result);
if (!result.success) {
failed = true;
break;
}
// Update file content for subsequent edits to same file
if (result.success) {
const newContent = data.validation.fileContent.replace(
edit.old_string,
edit.new_string,
);
// Update the validation cache for potential subsequent edits to same file
validations.set(edit.file_path, {
...data,
validation: { ...data.validation, fileContent: newContent },
});
}
}
// Phase 4: Rollback if any edit failed
if (failed) {
ctx.onMetadata?.({
title: MULTI_EDIT_TITLES.ROLLBACK,
status: "running",
});
await rollback(backups);
return createErrorResult(MULTI_EDIT_MESSAGES.ATOMIC_FAILURE);
}
return createSuccessResult(results, edits.length);
};
export const multiEditTool: ToolDefinition<typeof multiEditParams> = {
name: "multi_edit",
description: MULTI_EDIT_DESCRIPTION,
parameters: multiEditParams,
execute: executeMultiEdit,
};

View File

@@ -0,0 +1,13 @@
/**
* MultiEdit Tool
*
* Batch file editing with atomic transactions
*/
export { multiEditTool, executeMultiEdit } from "@tools/multi-edit/execute";
export {
multiEditParams,
editItemSchema,
type EditItem,
type MultiEditParams,
} from "@tools/multi-edit/params";

View File

@@ -0,0 +1,21 @@
/**
* MultiEdit Tool Parameters
*/
import { z } from "zod";
export const editItemSchema = z.object({
file_path: z.string().describe("Absolute path to the file to edit"),
old_string: z.string().describe("The exact text to find and replace"),
new_string: z.string().describe("The replacement text"),
});
export const multiEditParams = z.object({
edits: z
.array(editItemSchema)
.min(1)
.describe("Array of edits to apply atomically"),
});
export type EditItem = z.infer<typeof editItemSchema>;
export type MultiEditParams = z.infer<typeof multiEditParams>;

View File

@@ -0,0 +1,346 @@
/**
* WebFetch Tool Execution
*
* Fetches content from URLs and converts HTML to markdown
*/
import {
WEB_FETCH_DEFAULTS,
WEB_FETCH_MESSAGES,
WEB_FETCH_TITLES,
WEB_FETCH_DESCRIPTION,
HTML_REMOVE_ELEMENTS,
} from "@constants/web-fetch";
import { webFetchParams } from "@tools/web-fetch/params";
import type { ToolDefinition, ToolContext, ToolResult } from "@/types/tools";
import type { WebFetchParams } from "@tools/web-fetch/params";
const createErrorResult = (error: string): ToolResult => ({
success: false,
title: WEB_FETCH_TITLES.FAILED,
output: "",
error,
});
const createSuccessResult = (
url: string,
content: string,
contentType: string,
): ToolResult => ({
success: true,
title: WEB_FETCH_TITLES.SUCCESS,
output: content,
metadata: {
url,
contentType,
contentLength: content.length,
},
});
/**
* Validate URL format
*/
const validateUrl = (url: string): URL | null => {
try {
const parsed = new URL(url);
// Upgrade HTTP to HTTPS
if (parsed.protocol === "http:") {
parsed.protocol = "https:";
}
if (!["https:", "http:"].includes(parsed.protocol)) {
return null;
}
return parsed;
} catch {
return null;
}
};
/**
* Remove HTML elements by tag name
*/
const removeElements = (html: string, tags: string[]): string => {
let result = html;
for (const tag of tags) {
// Remove self-closing and regular tags
const selfClosingPattern = new RegExp(`<${tag}[^>]*/>`, "gi");
const openClosePattern = new RegExp(
`<${tag}[^>]*>[\\s\\S]*?</${tag}>`,
"gi",
);
result = result.replace(selfClosingPattern, "");
result = result.replace(openClosePattern, "");
}
return result;
};
/**
* Decode HTML entities
*/
const decodeHtmlEntities = (text: string): string => {
const entities: Record<string, string> = {
"&amp;": "&",
"&lt;": "<",
"&gt;": ">",
"&quot;": '"',
"&#39;": "'",
"&nbsp;": " ",
"&#x27;": "'",
"&#x2F;": "/",
"&mdash;": "—",
"&ndash;": "",
"&hellip;": "…",
"&rsquo;": "'",
"&lsquo;": "'",
"&rdquo;": '"',
"&ldquo;": '"',
"&copy;": "©",
"&reg;": "®",
"&trade;": "™",
};
let decoded = text;
for (const [entity, char] of Object.entries(entities)) {
decoded = decoded.replace(new RegExp(entity, "g"), char);
}
// Handle numeric entities
decoded = decoded.replace(/&#(\d+);/g, (_, code) =>
String.fromCharCode(parseInt(code, 10)),
);
decoded = decoded.replace(/&#x([0-9a-fA-F]+);/g, (_, code) =>
String.fromCharCode(parseInt(code, 16)),
);
return decoded;
};
/**
* Convert HTML to markdown
*/
const htmlToMarkdown = (html: string): string => {
// Remove unwanted elements
let content = removeElements(html, HTML_REMOVE_ELEMENTS);
// Extract body content if present
const bodyMatch = content.match(/<body[^>]*>([\s\S]*)<\/body>/i);
if (bodyMatch) {
content = bodyMatch[1];
}
// Convert headers
content = content.replace(/<h1[^>]*>([\s\S]*?)<\/h1>/gi, "\n# $1\n");
content = content.replace(/<h2[^>]*>([\s\S]*?)<\/h2>/gi, "\n## $1\n");
content = content.replace(/<h3[^>]*>([\s\S]*?)<\/h3>/gi, "\n### $1\n");
content = content.replace(/<h4[^>]*>([\s\S]*?)<\/h4>/gi, "\n#### $1\n");
content = content.replace(/<h5[^>]*>([\s\S]*?)<\/h5>/gi, "\n##### $1\n");
content = content.replace(/<h6[^>]*>([\s\S]*?)<\/h6>/gi, "\n###### $1\n");
// Convert links
content = content.replace(
/<a[^>]*href="([^"]*)"[^>]*>([\s\S]*?)<\/a>/gi,
"[$2]($1)",
);
// Convert images
content = content.replace(
/<img[^>]*src="([^"]*)"[^>]*alt="([^"]*)"[^>]*\/?>/gi,
"![$2]($1)",
);
content = content.replace(/<img[^>]*src="([^"]*)"[^>]*\/?>/gi, "![]($1)");
// Convert emphasis
content = content.replace(/<strong[^>]*>([\s\S]*?)<\/strong>/gi, "**$1**");
content = content.replace(/<b[^>]*>([\s\S]*?)<\/b>/gi, "**$1**");
content = content.replace(/<em[^>]*>([\s\S]*?)<\/em>/gi, "*$1*");
content = content.replace(/<i[^>]*>([\s\S]*?)<\/i>/gi, "*$1*");
// Convert code
content = content.replace(/<code[^>]*>([\s\S]*?)<\/code>/gi, "`$1`");
content = content.replace(
/<pre[^>]*>([\s\S]*?)<\/pre>/gi,
"\n```\n$1\n```\n",
);
// Convert lists
content = content.replace(/<li[^>]*>([\s\S]*?)<\/li>/gi, "- $1\n");
content = content.replace(/<\/?[ou]l[^>]*>/gi, "\n");
// Convert paragraphs and line breaks
content = content.replace(/<p[^>]*>([\s\S]*?)<\/p>/gi, "\n$1\n");
content = content.replace(/<br\s*\/?>/gi, "\n");
content = content.replace(/<hr\s*\/?>/gi, "\n---\n");
// Convert blockquotes
content = content.replace(
/<blockquote[^>]*>([\s\S]*?)<\/blockquote>/gi,
(_, text) => {
return text
.split("\n")
.map((line: string) => `> ${line}`)
.join("\n");
},
);
// Remove remaining HTML tags
content = content.replace(/<[^>]+>/g, "");
// Decode HTML entities
content = decodeHtmlEntities(content);
// Clean up whitespace
content = content.replace(/\n{3,}/g, "\n\n");
content = content.replace(/[ \t]+/g, " ");
content = content.trim();
return content;
};
/**
* Format JSON for readability
*/
const formatJson = (json: string): string => {
try {
const parsed = JSON.parse(json);
return "```json\n" + JSON.stringify(parsed, null, 2) + "\n```";
} catch {
return json;
}
};
/**
* Process content based on content type
*/
const processContent = (content: string, contentType: string): string => {
const type = contentType.toLowerCase();
if (type.includes("json")) {
return formatJson(content);
}
if (type.includes("html") || type.includes("xhtml")) {
return htmlToMarkdown(content);
}
// Plain text, markdown, etc.
return content;
};
/**
* Truncate content if too large
*/
const truncateContent = (content: string, maxLength: number): string => {
if (content.length <= maxLength) {
return content;
}
const truncated = content.slice(0, maxLength);
const lastNewline = truncated.lastIndexOf("\n");
const cutPoint = lastNewline > maxLength * 0.8 ? lastNewline : maxLength;
return (
truncated.slice(0, cutPoint) +
"\n\n... (content truncated, showing first " +
Math.round(maxLength / 1000) +
"KB)"
);
};
/**
* Execute web fetch
*/
export const executeWebFetch = async (
args: WebFetchParams,
ctx: ToolContext,
): Promise<ToolResult> => {
const { url, timeout = WEB_FETCH_DEFAULTS.TIMEOUT_MS } = args;
if (!url || url.trim().length === 0) {
return createErrorResult(WEB_FETCH_MESSAGES.URL_REQUIRED);
}
const parsedUrl = validateUrl(url);
if (!parsedUrl) {
return createErrorResult(WEB_FETCH_MESSAGES.INVALID_URL(url));
}
ctx.onMetadata?.({
title: WEB_FETCH_TITLES.FETCHING(parsedUrl.hostname),
status: "running",
});
try {
// Create abort controller with timeout
const controller = new AbortController();
const timeoutId = setTimeout(() => controller.abort(), timeout);
// Merge with context abort signal
ctx.abort.signal.addEventListener("abort", () => controller.abort());
const response = await fetch(parsedUrl.toString(), {
headers: {
"User-Agent": WEB_FETCH_DEFAULTS.USER_AGENT,
Accept:
"text/html,application/xhtml+xml,application/xml;q=0.9,application/json,text/plain;q=0.8",
"Accept-Language": "en-US,en;q=0.9",
},
signal: controller.signal,
redirect: "follow",
});
clearTimeout(timeoutId);
if (!response.ok) {
return createErrorResult(
WEB_FETCH_MESSAGES.FETCH_ERROR(`HTTP ${response.status}`),
);
}
// Check for redirect to different host
const finalUrl = new URL(response.url);
if (finalUrl.host !== parsedUrl.host) {
return {
success: true,
title: WEB_FETCH_TITLES.SUCCESS,
output: WEB_FETCH_MESSAGES.REDIRECT_DETECTED(
parsedUrl.host,
finalUrl.host,
),
metadata: {
redirectUrl: response.url,
originalUrl: url,
},
};
}
const contentType = response.headers.get("content-type") || "text/plain";
let content = await response.text();
// Check content length
if (content.length > WEB_FETCH_DEFAULTS.MAX_CONTENT_LENGTH) {
content = truncateContent(
content,
WEB_FETCH_DEFAULTS.MAX_CONTENT_LENGTH,
);
}
// Process content based on type
const processed = processContent(content, contentType);
return createSuccessResult(response.url, processed, contentType);
} catch (error) {
if (ctx.abort.signal.aborted) {
return createErrorResult(WEB_FETCH_MESSAGES.TIMEOUT);
}
const message = error instanceof Error ? error.message : String(error);
return createErrorResult(WEB_FETCH_MESSAGES.FETCH_ERROR(message));
}
};
export const webFetchTool: ToolDefinition<typeof webFetchParams> = {
name: "web_fetch",
description: WEB_FETCH_DESCRIPTION,
parameters: webFetchParams,
execute: executeWebFetch,
};

View File

@@ -0,0 +1,8 @@
/**
* WebFetch Tool
*
* Fetch and convert web content to markdown
*/
export { webFetchTool, executeWebFetch } from "@tools/web-fetch/execute";
export { webFetchParams, type WebFetchParams } from "@tools/web-fetch/params";

View File

@@ -0,0 +1,19 @@
/**
* WebFetch Tool Parameters
*/
import { z } from "zod";
export const webFetchParams = z.object({
url: z.string().describe("The URL to fetch content from"),
prompt: z
.string()
.optional()
.describe("Optional prompt to extract specific information from the content"),
timeout: z
.number()
.optional()
.describe("Timeout in milliseconds (default: 30000)"),
});
export type WebFetchParams = z.infer<typeof webFetchParams>;

View File

@@ -1,7 +1,7 @@
/**
* Web Search Tool Execution
*
* Uses DuckDuckGo HTML search (no API key required)
* Uses Bing RSS search (no API key required, no captcha)
*/
import {
@@ -55,69 +55,6 @@ const createSuccessResult = (
};
};
/**
* Parse DuckDuckGo HTML search results
*/
const parseSearchResults = (html: string, maxResults: number): SearchResult[] => {
const results: SearchResult[] = [];
// DuckDuckGo lite HTML structure parsing
// Look for result links and snippets
const resultPattern =
/<a[^>]+class="result-link"[^>]*href="([^"]+)"[^>]*>([^<]+)<\/a>[\s\S]*?<td[^>]*class="result-snippet"[^>]*>([^<]+)/gi;
// Alternative pattern for standard DuckDuckGo HTML
const altPattern =
/<a[^>]+rel="nofollow"[^>]*href="([^"]+)"[^>]*>([^<]+)<\/a>[\s\S]*?<span[^>]*>([^<]{20,})/gi;
// Try result-link pattern first
let match: RegExpExecArray | null;
while ((match = resultPattern.exec(html)) !== null && results.length < maxResults) {
const [, url, title, snippet] = match;
if (url && title && !url.includes("duckduckgo.com")) {
results.push({
title: decodeHtmlEntities(title.trim()),
url: decodeUrl(url),
snippet: decodeHtmlEntities(snippet.trim()),
});
}
}
// If no results, try alternative pattern
if (results.length === 0) {
while ((match = altPattern.exec(html)) !== null && results.length < maxResults) {
const [, url, title, snippet] = match;
if (url && title && !url.includes("duckduckgo.com")) {
results.push({
title: decodeHtmlEntities(title.trim()),
url: decodeUrl(url),
snippet: decodeHtmlEntities(snippet.trim()),
});
}
}
}
// Fallback: extract any external links with reasonable text
if (results.length === 0) {
const linkPattern = /<a[^>]+href="(https?:\/\/(?!duckduckgo)[^"]+)"[^>]*>([^<]{10,100})<\/a>/gi;
const seenUrls = new Set<string>();
while ((match = linkPattern.exec(html)) !== null && results.length < maxResults) {
const [, url, title] = match;
if (!seenUrls.has(url) && !url.includes("duckduckgo")) {
seenUrls.add(url);
results.push({
title: decodeHtmlEntities(title.trim()),
url: decodeUrl(url),
snippet: "",
});
}
}
}
return results;
};
/**
* Decode HTML entities
*/
@@ -147,21 +84,36 @@ const decodeHtmlEntities = (text: string): string => {
};
/**
* Decode DuckDuckGo redirect URLs
* Parse Bing RSS search results
*/
const decodeUrl = (url: string): string => {
// DuckDuckGo often wraps URLs in redirects
if (url.includes("uddg=")) {
const match = url.match(/uddg=([^&]+)/);
if (match) {
return decodeURIComponent(match[1]);
const parseRssResults = (rss: string, maxResults: number): SearchResult[] => {
const results: SearchResult[] = [];
// Parse RSS items
const itemPattern = /<item>([\s\S]*?)<\/item>/gi;
let match: RegExpExecArray | null;
while ((match = itemPattern.exec(rss)) !== null && results.length < maxResults) {
const itemContent = match[1];
const titleMatch = itemContent.match(/<title>([^<]+)<\/title>/);
const linkMatch = itemContent.match(/<link>([^<]+)<\/link>/);
const descMatch = itemContent.match(/<description>([^<]*)<\/description>/);
if (titleMatch && linkMatch) {
results.push({
title: decodeHtmlEntities(titleMatch[1].trim()),
url: linkMatch[1].trim(),
snippet: descMatch ? decodeHtmlEntities(descMatch[1].trim()) : "",
});
}
}
return url;
return results;
};
/**
* Perform web search using DuckDuckGo
* Perform web search using Bing RSS
*/
const performSearch = async (
query: string,
@@ -170,13 +122,13 @@ const performSearch = async (
): Promise<SearchResult[]> => {
const encodedQuery = encodeURIComponent(query);
// Use DuckDuckGo HTML search (lite version for easier parsing)
const searchUrl = `https://lite.duckduckgo.com/lite/?q=${encodedQuery}`;
// Use Bing RSS search (no captcha, no API key required)
const searchUrl = `https://www.bing.com/search?q=${encodedQuery}&format=rss`;
const response = await fetch(searchUrl, {
headers: {
"User-Agent": WEB_SEARCH_DEFAULTS.USER_AGENT,
Accept: "text/html",
Accept: "application/rss+xml, text/xml",
"Accept-Language": "en-US,en;q=0.9",
},
signal,
@@ -186,8 +138,8 @@ const performSearch = async (
throw new Error(`Search request failed: ${response.status}`);
}
const html = await response.text();
return parseSearchResults(html, maxResults);
const rss = await response.text();
return parseRssResults(rss, maxResults);
};
/**