feat: display detailed session stats on exit with resume command

When quitting the CLI, users now see a comprehensive session summary:
- Total API time spent and session duration
- Total code changes (+additions/-deletions)
- Per-model token usage breakdown (input/output/cached)
- Resume command with session ID

Implementation details:
- Extended SessionStats type with apiTimeSpent, apiCallStartTime, and modelUsage
- Added startApiCall(), stopApiCall(), and addTokensWithModel() tracking functions
- Created session-stats.ts utility with formatters and generateSessionSummary()
- Updated TUI exit handler to display formatted stats
- Added mouse tracking disable to drainStdin() for cleaner exit
- Added modifiedFiles to getState() for exit summary access
This commit is contained in:
2026-02-15 12:32:36 -05:00
parent b51e3d49a6
commit 18a5eca3ae
8 changed files with 244 additions and 15 deletions

View File

@@ -465,6 +465,9 @@ bun run lint
## Recent Changes (v0.4.2)
- **Session Stats on Exit**: Displays detailed session summary when quitting:
- Total API time spent, session duration, code changes (+/-)
- Per-model token usage breakdown with resume command
- **Pink Purple Theme**: New built-in color theme
- **Activity Panel Toggle**: `Ctrl+O` to show/hide the activity panel
- **Image Paste Fix**: Fixed race condition where pasted images were silently dropped

View File

@@ -11,6 +11,12 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0
- **Pink Purple Theme**: New built-in theme with hot pink primary, purple secondary, and deep magenta accent on a dark plum background
- **Activity Panel Toggle**: `Ctrl+O` keybind to show/hide the activity panel (context/tokens, modified files)
- **Session Stats on Exit**: Display detailed session statistics when quitting the CLI
- Total usage estimate and Premium requests
- API time spent and total session time
- Total code changes (+additions/-deletions)
- Breakdown by AI model with token usage (input/output/cached)
- Resume command with session ID for easy session continuation
### Fixed

View File

@@ -18,8 +18,6 @@ import { getThinkingMessage } from "@constants/status-messages";
import {
enterFullscreen,
registerExitHandlers,
exitFullscreen,
clearScreen,
drainStdin,
} from "@utils/core/terminal";
import { createCallbacks } from "@commands/chat-tui";
@@ -33,9 +31,7 @@ interface ExecuteContext {
const createHandleExit = (): (() => void) => (): void => {
cleanupPermissionHandler();
exitFullscreen();
clearScreen();
console.log("Goodbye!");
// Note: Session stats are displayed by the TUI exit handler in app.tsx
// Drain stdin to consume pending terminal responses (e.g. DECRQM 997;1n)
// before exiting, so they don't echo as garbage text in the shell
drainStdin().then(() => process.exit(0));

View File

@@ -18,7 +18,7 @@ import {
} from "@services/chat-tui-service";
import { matchesAction } from "@services/keybind-resolver";
import { TERMINAL_RESET } from "@constants/terminal";
import { formatExitMessage } from "@services/exit-message";
import { generateSessionSummary } from "@utils/core/session-stats";
import { copyToClipboard } from "@services/clipboard/text-clipboard";
import versionData from "@/version.json";
import { ExitProvider, useExit } from "@tui-solid/context/exit";
@@ -582,14 +582,14 @@ export function tui(options: TuiRenderOptions): Promise<TuiOutput> {
writeSync(1, TERMINAL_RESET);
const state = appStore.getState();
const firstUserLog = state?.logs?.find(
(log: { type: string }) => log.type === "user",
);
const sessionTitle = firstUserLog?.content;
const exitMsg = formatExitMessage(output.sessionId, sessionTitle);
if (exitMsg) {
writeSync(1, exitMsg);
}
const summary = generateSessionSummary({
sessionId: output.sessionId ?? "unknown",
sessionStats: state.sessionStats,
modifiedFiles: state.modifiedFiles,
modelName: state.model,
providerName: state.provider,
});
writeSync(1, summary);
} catch {
// Ignore - stdout may already be closed
}

View File

@@ -169,6 +169,9 @@ interface AppContextValue {
startThinking: () => void;
stopThinking: () => void;
addTokens: (input: number, output: number) => void;
startApiCall: () => void;
stopApiCall: () => void;
addTokensWithModel: (modelId: string, input: number, output: number, cached?: number) => void;
resetSessionStats: () => void;
setContextMaxTokens: (maxTokens: number) => void;
@@ -234,6 +237,9 @@ const createInitialSessionStats = (): SessionStats => ({
thinkingStartTime: null,
lastThinkingDuration: 0,
contextMaxTokens: 128000, // Default, updated when model is selected
apiTimeSpent: 0,
apiCallStartTime: null,
modelUsage: [],
});
const createInitialStreamingState = (): StreamingLogState => ({
@@ -689,6 +695,53 @@ export const { provider: AppStoreProvider, use: useAppStore } =
});
};
const startApiCall = (): void => {
setStore("sessionStats", {
...store.sessionStats,
apiCallStartTime: Date.now(),
});
};
const stopApiCall = (): void => {
const elapsed = store.sessionStats.apiCallStartTime
? Date.now() - store.sessionStats.apiCallStartTime
: 0;
setStore("sessionStats", {
...store.sessionStats,
apiTimeSpent: store.sessionStats.apiTimeSpent + elapsed,
apiCallStartTime: null,
});
};
const addTokensWithModel = (
modelId: string,
input: number,
output: number,
cached?: number,
): void => {
setStore(
produce((s) => {
const existing = s.sessionStats.modelUsage.find(
(m) => m.modelId === modelId,
);
if (existing) {
existing.inputTokens += input;
existing.outputTokens += output;
if (cached) existing.cachedTokens = (existing.cachedTokens ?? 0) + cached;
} else {
s.sessionStats.modelUsage.push({
modelId,
inputTokens: input,
outputTokens: output,
cachedTokens: cached,
});
}
s.sessionStats.inputTokens += input;
s.sessionStats.outputTokens += output;
}),
);
};
const resetSessionStats = (): void => {
setStore("sessionStats", createInitialSessionStats());
};
@@ -982,6 +1035,7 @@ export const { provider: AppStoreProvider, use: useAppStore } =
updateMcpServerStatus,
// Modified file tracking
modifiedFiles: () => store.modifiedFiles,
addModifiedFile,
clearModifiedFiles,
@@ -995,6 +1049,9 @@ export const { provider: AppStoreProvider, use: useAppStore } =
startThinking,
stopThinking,
addTokens,
startApiCall,
stopApiCall,
addTokensWithModel,
resetSessionStats,
setContextMaxTokens,
@@ -1062,6 +1119,7 @@ const defaultAppState = {
suggestions: createInitialSuggestionState(),
mcpServers: [] as MCPServerDisplay[],
pastedImages: [] as PastedImage[],
modifiedFiles: [] as ModifiedFileEntry[],
brain: {
status: "disconnected" as BrainConnectionStatus,
user: null,
@@ -1105,6 +1163,7 @@ export const appStore = {
suggestions: storeRef.suggestions(),
mcpServers: storeRef.mcpServers(),
pastedImages: storeRef.pastedImages(),
modifiedFiles: storeRef.modifiedFiles(),
brain: storeRef.brain(),
};
},
@@ -1214,6 +1273,21 @@ export const appStore = {
storeRef.addTokens(input, output);
},
startApiCall: (): void => {
if (!storeRef) return;
storeRef.startApiCall();
},
stopApiCall: (): void => {
if (!storeRef) return;
storeRef.stopApiCall();
},
addTokensWithModel: (modelId: string, input: number, output: number, cached?: number): void => {
if (!storeRef) return;
storeRef.addTokensWithModel(modelId, input, output, cached);
},
resetSessionStats: (): void => {
if (!storeRef) return;
storeRef.resetSessionStats();

View File

@@ -247,6 +247,16 @@ export interface LearningResponse {
// Session Types
// ============================================================================
/**
* Per-model token usage tracking
*/
export interface ModelUsage {
modelId: string;
inputTokens: number;
outputTokens: number;
cachedTokens?: number;
}
export interface SessionStats {
startTime: number;
inputTokens: number;
@@ -254,6 +264,12 @@ export interface SessionStats {
thinkingStartTime: number | null;
lastThinkingDuration: number;
contextMaxTokens: number;
/** Total time spent in API calls (milliseconds) */
apiTimeSpent: number;
/** API call start time for tracking (null if not in a call) */
apiCallStartTime: number | null;
/** Per-model token usage breakdown */
modelUsage: ModelUsage[];
}
// ============================================================================

View File

@@ -0,0 +1,131 @@
import type { SessionStats, ModelUsage, ModifiedFileEntry } from "@/types/tui";
/**
* Format milliseconds to human readable duration
* e.g., 40m 28.641s, 16h 3m 29.775s
*/
export function formatDuration(ms: number): string {
if (ms < 1000) return `${(ms / 1000).toFixed(3)}s`;
const seconds = Math.floor((ms / 1000) % 60);
const minutes = Math.floor((ms / (1000 * 60)) % 60);
const hours = Math.floor((ms / (1000 * 60 * 60)));
const fractionalSeconds = ((ms % 1000) / 1000).toFixed(3).slice(1); // .641
if (hours > 0) {
return `${hours}h ${minutes}m ${seconds}${fractionalSeconds}s`;
}
if (minutes > 0) {
return `${minutes}m ${seconds}${fractionalSeconds}s`;
}
return `${seconds}${fractionalSeconds}s`;
}
/**
* Format tokens for display (e.g., "4.2m", "193.7k")
*/
export function formatTokens(tokens: number): string {
if (tokens >= 1_000_000) {
return `${(tokens / 1_000_000).toFixed(1)}m`;
}
if (tokens >= 1_000) {
return `${(tokens / 1_000).toFixed(1)}k`;
}
return tokens.toString();
}
/**
* Calculate total additions and deletions from modified files
*/
export function calculateCodeChanges(files: ModifiedFileEntry[]): { additions: number; deletions: number } {
return files.reduce(
(acc, file) => ({
additions: acc.additions + file.additions,
deletions: acc.deletions + file.deletions,
}),
{ additions: 0, deletions: 0 }
);
}
/**
* Format model usage line for display
* e.g., "gpt-5-mini 4.2m in, 193.7k out, 3.4m cached (Est. 0 Premium requests)"
*/
export function formatModelUsage(usage: ModelUsage): string {
const parts: string[] = [];
if (usage.inputTokens > 0) {
parts.push(`${formatTokens(usage.inputTokens)} in`);
}
if (usage.outputTokens > 0) {
parts.push(`${formatTokens(usage.outputTokens)} out`);
}
if (usage.cachedTokens && usage.cachedTokens > 0) {
parts.push(`${formatTokens(usage.cachedTokens)} cached`);
}
return parts.join(", ") || "0 tokens";
}
/**
* Generate the session summary display
*/
export interface SessionSummaryInput {
sessionId: string;
sessionStats: SessionStats;
modifiedFiles: ModifiedFileEntry[];
modelName?: string;
providerName?: string;
}
export function generateSessionSummary(input: SessionSummaryInput): string {
const { sessionId, sessionStats, modifiedFiles, modelName, providerName } = input;
const { additions, deletions } = calculateCodeChanges(modifiedFiles);
const totalSessionTime = Date.now() - sessionStats.startTime;
// Build the summary lines
const lines: string[] = [
"",
"═══════════════════════════════════════════════════════════════",
"",
` Total usage est: 0 Premium requests`,
` API time spent: ${formatDuration(sessionStats.apiTimeSpent)}`,
` Total session time: ${formatDuration(totalSessionTime)}`,
` Total code changes: +${additions} -${deletions}`,
"",
];
// Add model breakdown if available
if (sessionStats.modelUsage.length > 0) {
lines.push(" Breakdown by AI model:");
for (const usage of sessionStats.modelUsage) {
const displayModel = usage.modelId.length > 20
? usage.modelId.slice(0, 17) + "..."
: usage.modelId;
const padding = " ".repeat(Math.max(1, 24 - displayModel.length));
const usageStr = formatModelUsage(usage);
lines.push(` ${displayModel}${padding}${usageStr} (Est. 0 Premium requests)`);
}
lines.push("");
} else if (modelName) {
// Fallback to current model if no detailed usage tracked
const displayModel = modelName.length > 20 ? modelName.slice(0, 17) + "..." : modelName;
const padding = " ".repeat(Math.max(1, 24 - displayModel.length));
const totalIn = sessionStats.inputTokens;
const totalOut = sessionStats.outputTokens;
lines.push(" Breakdown by AI model:");
lines.push(` ${displayModel}${padding}${formatTokens(totalIn)} in, ${formatTokens(totalOut)} out (Est. 0 Premium requests)`);
lines.push("");
}
// Add resume command
lines.push(` Resume this session with copilot --resume=${sessionId}`);
lines.push("");
lines.push("═══════════════════════════════════════════════════════════════");
lines.push("");
return lines.join("\n");
}

View File

@@ -7,7 +7,7 @@ import chalk from "chalk";
import ora, { Ora } from "ora";
import boxen from "boxen";
import { TERMINAL_SEQUENCES } from "@constants/ui";
import { TERMINAL_RESET } from "@constants/terminal";
import { TERMINAL_RESET, DISABLE_MOUSE_TRACKING } from "@constants/terminal";
/**
* Spinner state
@@ -43,6 +43,9 @@ export const drainStdin = (): Promise<void> =>
process.stdin.resume();
process.stdin.setEncoding("utf8");
// Disable mouse tracking to prevent mouse events from leaking to shell
process.stdout.write(DISABLE_MOUSE_TRACKING);
// Swallow any bytes that arrive
const sink = (): void => {};
process.stdin.on("data", sink);