Merge remote-tracking branch 'origin/main' into ani-patches
This commit is contained in:
8
.husky/install.mjs
Normal file
8
.husky/install.mjs
Normal file
@@ -0,0 +1,8 @@
|
||||
if (process.env.NODE_ENV === "production" || process.env.CI === "true") {
|
||||
process.exit(0);
|
||||
}
|
||||
|
||||
try {
|
||||
const husky = (await import("husky")).default;
|
||||
husky();
|
||||
} catch {}
|
||||
@@ -1,3 +1,7 @@
|
||||
#!/usr/bin/env sh
|
||||
# Run the same checks as CI to ensure parity
|
||||
bun run check
|
||||
|
||||
# Lint staged files only (fast)
|
||||
bunx lint-staged
|
||||
|
||||
# Typecheck full project (catches type errors in unchanged files)
|
||||
bun run typecheck
|
||||
|
||||
4
bun.lock
4
bun.lock
@@ -5,7 +5,7 @@
|
||||
"": {
|
||||
"name": "@letta-ai/letta-code",
|
||||
"dependencies": {
|
||||
"@letta-ai/letta-client": "^1.7.12",
|
||||
"@letta-ai/letta-client": "1.8.0",
|
||||
"glob": "^13.0.0",
|
||||
"highlight.js": "^11.11.1",
|
||||
"ink-link": "^5.0.0",
|
||||
@@ -96,7 +96,7 @@
|
||||
|
||||
"@isaacs/brace-expansion": ["@isaacs/brace-expansion@5.0.0", "", { "dependencies": { "@isaacs/balanced-match": "^4.0.1" } }, "sha512-ZT55BDLV0yv0RBm2czMiZ+SqCGO7AvmOM3G/w2xhVPH+te0aKgFjmBvGlL1dH+ql2tgGO3MVrbb3jCKyvpgnxA=="],
|
||||
|
||||
"@letta-ai/letta-client": ["@letta-ai/letta-client@1.7.12", "", {}, "sha512-OV3YuT+f8iVfu56JugDEz/29tBm3SZXMgdNAG016Lu1zneDSSt+7tFpI6eetrKRhJ8RWLJzCwnrkmXNnph1Wuw=="],
|
||||
"@letta-ai/letta-client": ["@letta-ai/letta-client@1.8.0", "", {}, "sha512-OdeMH0vfwFqMNuNyOypJ2wgWI9m6xFDhcODQVJk1geAoCJ3F1BkmEWz8+lWM+NyNkk/4JjcoqUTimIzVH+u0JQ=="],
|
||||
|
||||
"@types/bun": ["@types/bun@1.3.7", "", { "dependencies": { "bun-types": "1.3.7" } }, "sha512-lmNuMda+Z9b7tmhA0tohwy8ZWFSnmQm1UDWXtH5r9F7wZCfkeO3Jx7wKQ1EOiKq43yHts7ky6r8SDJQWRNupkA=="],
|
||||
|
||||
|
||||
4
package-lock.json
generated
4
package-lock.json
generated
@@ -1,12 +1,12 @@
|
||||
{
|
||||
"name": "@letta-ai/letta-code",
|
||||
"version": "0.19.5",
|
||||
"version": "0.19.7",
|
||||
"lockfileVersion": 3,
|
||||
"requires": true,
|
||||
"packages": {
|
||||
"": {
|
||||
"name": "@letta-ai/letta-code",
|
||||
"version": "0.19.5",
|
||||
"version": "0.19.7",
|
||||
"hasInstallScript": true,
|
||||
"license": "Apache-2.0",
|
||||
"dependencies": {
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
{
|
||||
"name": "@letta-ai/letta-code",
|
||||
"version": "0.19.5",
|
||||
"version": "0.19.7",
|
||||
"description": "Letta Code is a CLI tool for interacting with stateful Letta agents from the terminal.",
|
||||
"type": "module",
|
||||
"bin": {
|
||||
@@ -33,7 +33,7 @@
|
||||
"access": "public"
|
||||
},
|
||||
"dependencies": {
|
||||
"@letta-ai/letta-client": "^1.7.12",
|
||||
"@letta-ai/letta-client": "1.8.0",
|
||||
"glob": "^13.0.0",
|
||||
"highlight.js": "^11.11.1",
|
||||
"ink-link": "^5.0.0",
|
||||
@@ -64,6 +64,7 @@
|
||||
"typescript": "^5.0.0"
|
||||
},
|
||||
"scripts": {
|
||||
"prepare": "node .husky/install.mjs",
|
||||
"lint": "bunx --bun @biomejs/biome@2.2.5 check src",
|
||||
"fix": "bunx --bun @biomejs/biome@2.2.5 check --write src",
|
||||
"typecheck": "tsc --noEmit",
|
||||
|
||||
@@ -189,6 +189,7 @@ export async function getClient() {
|
||||
apiKey,
|
||||
baseURL,
|
||||
logger: sdkLogger,
|
||||
timeout: 10 * 60 * 1000, // 10 min — letta-code manages cancellation via AbortController; SDK default (60s) is too short
|
||||
defaultHeaders: {
|
||||
"X-Letta-Source": "letta-code",
|
||||
"User-Agent": `letta-code/${packageJson.version}`,
|
||||
|
||||
@@ -375,8 +375,11 @@ export async function applyMemfsFlags(
|
||||
export async function isLettaCloud(): Promise<boolean> {
|
||||
const { getServerUrl } = await import("./client");
|
||||
const serverUrl = getServerUrl();
|
||||
|
||||
return (
|
||||
serverUrl.includes("api.letta.com") || process.env.LETTA_MEMFS_LOCAL === "1"
|
||||
serverUrl.includes("api.letta.com") ||
|
||||
process.env.LETTA_MEMFS_LOCAL === "1" ||
|
||||
process.env.LETTA_API_KEY === "local-desktop"
|
||||
);
|
||||
}
|
||||
|
||||
|
||||
@@ -150,21 +150,28 @@ async function configureLocalCredentialHelper(
|
||||
* Rules:
|
||||
* - Frontmatter is REQUIRED (must start with ---)
|
||||
* - Must be properly closed with ---
|
||||
* - Required fields: description (non-empty string), limit (positive integer)
|
||||
* - Required fields: description (non-empty string)
|
||||
* - read_only is a PROTECTED field: agent cannot add, remove, or change it.
|
||||
* Files where HEAD has read_only: true cannot be modified at all.
|
||||
* - Only allowed agent-editable keys: description, limit
|
||||
* - Only allowed agent-editable key: description
|
||||
* - Legacy key 'limit' is tolerated for backward compatibility
|
||||
* - read_only may exist (from server) but agent must not change it
|
||||
*/
|
||||
export const PRE_COMMIT_HOOK_SCRIPT = `#!/usr/bin/env bash
|
||||
# Validate frontmatter in staged memory .md files
|
||||
# Installed by Letta Code CLI
|
||||
|
||||
AGENT_EDITABLE_KEYS="description limit"
|
||||
AGENT_EDITABLE_KEYS="description"
|
||||
PROTECTED_KEYS="read_only"
|
||||
ALL_KNOWN_KEYS="description limit read_only"
|
||||
ALL_KNOWN_KEYS="description read_only limit"
|
||||
errors=""
|
||||
|
||||
# Skills must always be directories: skills/<name>/SKILL.md
|
||||
# Reject legacy flat skill files (both current and legacy repo layouts).
|
||||
for file in $(git diff --cached --name-only --diff-filter=ACMR | grep -E '^(memory/)?skills/[^/]+\\.md$' || true); do
|
||||
errors="$errors\\n $file: invalid skill path (skills must be folders). Use skills/<name>/SKILL.md"
|
||||
done
|
||||
|
||||
# Helper: extract a frontmatter value from content
|
||||
get_fm_value() {
|
||||
local content="$1" key="$2"
|
||||
@@ -174,7 +181,9 @@ get_fm_value() {
|
||||
echo "$content" | tail -n +2 | head -n $((closing_line - 1)) | grep "^$key:" | cut -d: -f2- | sed 's/^ *//;s/ *$//'
|
||||
}
|
||||
|
||||
for file in $(git diff --cached --name-only --diff-filter=ACM | grep '^memory/.*\\.md$'); do
|
||||
# Match .md files under system/ or reference/ (with optional memory/ prefix).
|
||||
# Skip skill SKILL.md files — they use a different frontmatter format.
|
||||
for file in $(git diff --cached --name-only --diff-filter=ACM | grep -E '^(memory/)?(system|reference)/.*\\.md$'); do
|
||||
staged=$(git show ":$file")
|
||||
|
||||
# Frontmatter is required
|
||||
@@ -206,7 +215,6 @@ for file in $(git diff --cached --name-only --diff-filter=ACM | grep '^memory/.*
|
||||
|
||||
# Track required fields
|
||||
has_description=false
|
||||
has_limit=false
|
||||
|
||||
# Validate each line
|
||||
while IFS= read -r line; do
|
||||
@@ -247,10 +255,7 @@ for file in $(git diff --cached --name-only --diff-filter=ACM | grep '^memory/.*
|
||||
# Validate value types
|
||||
case "$key" in
|
||||
limit)
|
||||
has_limit=true
|
||||
if ! echo "$value" | grep -qE '^[0-9]+$' || [ "$value" = "0" ]; then
|
||||
errors="$errors\\n $file: 'limit' must be a positive integer, got '$value'"
|
||||
fi
|
||||
# Legacy field accepted for backward compatibility.
|
||||
;;
|
||||
description)
|
||||
has_description=true
|
||||
@@ -265,9 +270,6 @@ for file in $(git diff --cached --name-only --diff-filter=ACM | grep '^memory/.*
|
||||
if [ "$has_description" = "false" ]; then
|
||||
errors="$errors\\n $file: missing required field 'description'"
|
||||
fi
|
||||
if [ "$has_limit" = "false" ]; then
|
||||
errors="$errors\\n $file: missing required field 'limit'"
|
||||
fi
|
||||
|
||||
# Check if protected keys were removed (existed in HEAD but not in staged)
|
||||
if [ -n "$head_content" ]; then
|
||||
|
||||
@@ -23,6 +23,7 @@ import {
|
||||
} from "./approval-result-normalization";
|
||||
import { getClient } from "./client";
|
||||
import { buildClientSkillsPayload } from "./clientSkills";
|
||||
import { ALL_SKILL_SOURCES } from "./skillSources";
|
||||
|
||||
const streamRequestStartTimes = new WeakMap<object, number>();
|
||||
const streamToolContextIds = new WeakMap<object, string>();
|
||||
@@ -132,6 +133,7 @@ export async function sendMessageStream(
|
||||
const { clientSkills, errors: clientSkillDiscoveryErrors } =
|
||||
await buildClientSkillsPayload({
|
||||
agentId: opts.agentId,
|
||||
skillSources: ALL_SKILL_SOURCES,
|
||||
});
|
||||
|
||||
const resolvedConversationId = conversationId;
|
||||
|
||||
@@ -6,6 +6,7 @@
|
||||
* action. No network calls, no React, no stream-json output.
|
||||
*/
|
||||
|
||||
import { randomUUID } from "node:crypto";
|
||||
import type { MessageCreate } from "@letta-ai/letta-client/resources/agents/agents";
|
||||
import type { ApprovalCreate } from "@letta-ai/letta-client/resources/agents/messages";
|
||||
import { isCloudflareEdge52xHtmlError } from "../cli/helpers/errorFormatter";
|
||||
@@ -15,8 +16,7 @@ import { isZaiNonRetryableError } from "../cli/helpers/zaiErrors";
|
||||
|
||||
const INVALID_TOOL_CALL_IDS_FRAGMENT = "invalid tool call ids";
|
||||
const APPROVAL_PENDING_DETAIL_FRAGMENT = "waiting for approval";
|
||||
const CONVERSATION_BUSY_DETAIL_FRAGMENT =
|
||||
"another request is currently being processed";
|
||||
const CONVERSATION_BUSY_DETAIL_FRAGMENT = "is currently being processed";
|
||||
const EMPTY_RESPONSE_DETAIL_FRAGMENT = "empty content in";
|
||||
const RETRYABLE_PROVIDER_DETAIL_PATTERNS = [
|
||||
"Anthropic API error",
|
||||
@@ -369,7 +369,10 @@ export function rebuildInputWithFreshDenials(
|
||||
serverApprovals: PendingApprovalInfo[],
|
||||
denialReason: string,
|
||||
): Array<MessageCreate | ApprovalCreate> {
|
||||
const stripped = currentInput.filter((item) => item?.type !== "approval");
|
||||
// Refresh OTIDs on all stripped messages — this is a new request, not a retry
|
||||
const stripped = currentInput
|
||||
.filter((item) => item?.type !== "approval")
|
||||
.map((item) => ({ ...item, otid: randomUUID() }));
|
||||
|
||||
if (serverApprovals.length > 0) {
|
||||
const denials: ApprovalCreate = {
|
||||
@@ -380,6 +383,7 @@ export function rebuildInputWithFreshDenials(
|
||||
approve: false,
|
||||
reason: denialReason,
|
||||
})),
|
||||
otid: randomUUID(),
|
||||
};
|
||||
return [denials, ...stripped];
|
||||
}
|
||||
|
||||
360
src/cli/App.tsx
360
src/cli/App.tsx
@@ -1,5 +1,6 @@
|
||||
// src/cli/App.tsx
|
||||
|
||||
import { randomUUID } from "node:crypto";
|
||||
import { existsSync, readFileSync, renameSync, writeFileSync } from "node:fs";
|
||||
import { homedir, tmpdir } from "node:os";
|
||||
import { join, relative } from "node:path";
|
||||
@@ -235,6 +236,7 @@ import {
|
||||
import { formatCompact } from "./helpers/format";
|
||||
import { parsePatchOperations } from "./helpers/formatArgsDisplay";
|
||||
import {
|
||||
buildDoctorMessage,
|
||||
buildInitMessage,
|
||||
fireAutoInit,
|
||||
gatherInitGitContext,
|
||||
@@ -280,13 +282,7 @@ import {
|
||||
import { formatStatusLineHelp } from "./helpers/statusLineHelp";
|
||||
import { buildStatusLinePayload } from "./helpers/statusLinePayload";
|
||||
import { executeStatusLineCommand } from "./helpers/statusLineRuntime";
|
||||
import {
|
||||
type ApprovalRequest,
|
||||
type DrainResult,
|
||||
discoverFallbackRunIdWithTimeout,
|
||||
drainStream,
|
||||
drainStreamWithResume,
|
||||
} from "./helpers/stream";
|
||||
import { type ApprovalRequest, drainStreamWithResume } from "./helpers/stream";
|
||||
import {
|
||||
collectFinishedTaskToolCalls,
|
||||
createSubagentGroupItem,
|
||||
@@ -1903,6 +1899,8 @@ export default function App({
|
||||
// Epoch counter to force dequeue effect re-run when refs change but state doesn't
|
||||
// Incremented when userCancelledRef is reset while messages are queued
|
||||
const [dequeueEpoch, setDequeueEpoch] = useState(0);
|
||||
// Strict lock to ensure dequeue submit path is at-most-once while onSubmit is in flight.
|
||||
const dequeueInFlightRef = useRef(false);
|
||||
|
||||
// Track last dequeued message for restoration on error
|
||||
// If an error occurs after dequeue, we restore this to the input field (if input is empty)
|
||||
@@ -3158,9 +3156,15 @@ export default function App({
|
||||
|
||||
const fetchConfig = async () => {
|
||||
try {
|
||||
// Use pre-loaded agent state if available, otherwise fetch
|
||||
const { getClient } = await import("../agent/client");
|
||||
const client = await getClient();
|
||||
const agent = await client.agents.retrieve(agentId);
|
||||
let agent: AgentState;
|
||||
if (initialAgentState && initialAgentState.id === agentId) {
|
||||
agent = initialAgentState;
|
||||
} else {
|
||||
agent = await client.agents.retrieve(agentId);
|
||||
}
|
||||
|
||||
setAgentState(agent);
|
||||
setLlmConfig(agent.llm_config);
|
||||
@@ -3306,7 +3310,7 @@ export default function App({
|
||||
cancelled = true;
|
||||
};
|
||||
}
|
||||
}, [loadingState, agentId]);
|
||||
}, [loadingState, agentId, initialAgentState]);
|
||||
|
||||
// Keep effective model state in sync with the active conversation override.
|
||||
// biome-ignore lint/correctness/useExhaustiveDependencies: ref.current is intentionally read dynamically
|
||||
@@ -3859,6 +3863,7 @@ export default function App({
|
||||
type: "message",
|
||||
role: "user",
|
||||
content: `${systemMsg}\n\n${newState.originalPrompt}`,
|
||||
otid: randomUUID(),
|
||||
},
|
||||
],
|
||||
{ allowReentry: true },
|
||||
@@ -3868,6 +3873,13 @@ export default function App({
|
||||
|
||||
// Copy so we can safely mutate for retry recovery flows
|
||||
let currentInput = [...initialInput];
|
||||
const refreshCurrentInputOtids = () => {
|
||||
// Terminal stop-reason retries are NEW requests and must not reuse OTIDs.
|
||||
currentInput = currentInput.map((item) => ({
|
||||
...item,
|
||||
otid: randomUUID(),
|
||||
}));
|
||||
};
|
||||
const allowReentry = options?.allowReentry ?? false;
|
||||
const hasApprovalInput = initialInput.some(
|
||||
(item) => item.type === "approval",
|
||||
@@ -3950,11 +3962,16 @@ export default function App({
|
||||
canInjectInterruptRecovery
|
||||
) {
|
||||
currentInput = [
|
||||
...lastSentInputRef.current,
|
||||
// Refresh OTIDs — this is a new request, not a retry of the interrupted one
|
||||
...lastSentInputRef.current.map((m) => ({
|
||||
...m,
|
||||
otid: randomUUID(),
|
||||
})),
|
||||
...currentInput.map((m) =>
|
||||
m.type === "message" && m.role === "user"
|
||||
? {
|
||||
...m,
|
||||
otid: randomUUID(),
|
||||
content: [
|
||||
{ type: "text" as const, text: INTERRUPT_RECOVERY_ALERT },
|
||||
...(typeof m.content === "string"
|
||||
@@ -3962,7 +3979,7 @@ export default function App({
|
||||
: m.content),
|
||||
],
|
||||
}
|
||||
: m,
|
||||
: { ...m, otid: randomUUID() },
|
||||
),
|
||||
];
|
||||
pendingInterruptRecoveryConversationIdRef.current = null;
|
||||
@@ -4006,6 +4023,7 @@ export default function App({
|
||||
// Capture once before the retry loop so the temporal filter in
|
||||
// discoverFallbackRunIdForResume covers runs created by any attempt.
|
||||
const requestStartedAtMs = Date.now();
|
||||
let highestSeqIdSeen: number | null = null;
|
||||
|
||||
while (true) {
|
||||
// Capture the signal BEFORE any async operations
|
||||
@@ -4027,23 +4045,22 @@ export default function App({
|
||||
// Inject queued skill content as user message parts (LET-7353)
|
||||
// This centralizes skill content injection so all approval-send paths
|
||||
// automatically get skill SKILL.md content alongside tool results.
|
||||
{
|
||||
const { consumeQueuedSkillContent } = await import(
|
||||
"../tools/impl/skillContentRegistry"
|
||||
);
|
||||
const skillContents = consumeQueuedSkillContent();
|
||||
if (skillContents.length > 0) {
|
||||
currentInput = [
|
||||
...currentInput,
|
||||
{
|
||||
role: "user",
|
||||
content: skillContents.map((sc) => ({
|
||||
type: "text" as const,
|
||||
text: sc.content,
|
||||
})),
|
||||
},
|
||||
];
|
||||
}
|
||||
const { consumeQueuedSkillContent } = await import(
|
||||
"../tools/impl/skillContentRegistry"
|
||||
);
|
||||
const skillContents = consumeQueuedSkillContent();
|
||||
if (skillContents.length > 0) {
|
||||
currentInput = [
|
||||
...currentInput,
|
||||
{
|
||||
role: "user",
|
||||
content: skillContents.map((sc) => ({
|
||||
type: "text" as const,
|
||||
text: sc.content,
|
||||
})),
|
||||
otid: randomUUID(),
|
||||
},
|
||||
];
|
||||
}
|
||||
|
||||
// Stream one turn - use ref to always get the latest conversationId
|
||||
@@ -4053,7 +4070,6 @@ export default function App({
|
||||
let stream: Awaited<ReturnType<typeof sendMessageStream>> | null =
|
||||
null;
|
||||
let turnToolContextId: string | null = null;
|
||||
let preStreamResumeResult: DrainResult | null = null;
|
||||
try {
|
||||
const nextStream = await sendMessageStream(
|
||||
conversationIdRef.current,
|
||||
@@ -4150,134 +4166,42 @@ export default function App({
|
||||
},
|
||||
);
|
||||
|
||||
// Attempt to discover and resume the in-flight run before waiting
|
||||
try {
|
||||
const resumeCtx: StreamRequestContext = {
|
||||
conversationId: conversationIdRef.current,
|
||||
resolvedConversationId: conversationIdRef.current,
|
||||
agentId: agentIdRef.current,
|
||||
requestStartedAtMs,
|
||||
};
|
||||
debugLog(
|
||||
"stream",
|
||||
"Conversation busy: attempting run discovery for resume (conv=%s, agent=%s)",
|
||||
resumeCtx.conversationId,
|
||||
resumeCtx.agentId,
|
||||
);
|
||||
const client = await getClient();
|
||||
const discoveredRunId = await discoverFallbackRunIdWithTimeout(
|
||||
client,
|
||||
resumeCtx,
|
||||
);
|
||||
debugLog(
|
||||
"stream",
|
||||
"Run discovery result: %s",
|
||||
discoveredRunId ?? "none",
|
||||
);
|
||||
// Show status message
|
||||
const statusId = uid("status");
|
||||
buffersRef.current.byId.set(statusId, {
|
||||
kind: "status",
|
||||
id: statusId,
|
||||
lines: ["Conversation is busy, waiting and retrying…"],
|
||||
});
|
||||
buffersRef.current.order.push(statusId);
|
||||
refreshDerived();
|
||||
|
||||
if (discoveredRunId) {
|
||||
if (signal?.aborted || userCancelledRef.current) {
|
||||
const isStaleAtAbort =
|
||||
myGeneration !== conversationGenerationRef.current;
|
||||
if (!isStaleAtAbort) {
|
||||
setStreaming(false);
|
||||
}
|
||||
return;
|
||||
}
|
||||
|
||||
// Found a running run — resume its stream
|
||||
buffersRef.current.interrupted = false;
|
||||
buffersRef.current.commitGeneration =
|
||||
(buffersRef.current.commitGeneration || 0) + 1;
|
||||
|
||||
const resumeStream = await client.runs.messages.stream(
|
||||
discoveredRunId,
|
||||
{
|
||||
starting_after: 0,
|
||||
batch_size: 1000,
|
||||
},
|
||||
);
|
||||
|
||||
preStreamResumeResult = await drainStream(
|
||||
resumeStream,
|
||||
buffersRef.current,
|
||||
refreshDerivedThrottled,
|
||||
signal,
|
||||
undefined, // no handleFirstMessage on resume
|
||||
undefined,
|
||||
contextTrackerRef.current,
|
||||
);
|
||||
// Attach the discovered run ID
|
||||
if (!preStreamResumeResult.lastRunId) {
|
||||
preStreamResumeResult.lastRunId = discoveredRunId;
|
||||
}
|
||||
debugLog(
|
||||
"stream",
|
||||
"Pre-stream resume succeeded (runId=%s, stopReason=%s)",
|
||||
discoveredRunId,
|
||||
preStreamResumeResult.stopReason,
|
||||
);
|
||||
// Fall through — preStreamResumeResult will short-circuit drainStreamWithResume
|
||||
// Wait with abort checking (same pattern as LLM API error retry)
|
||||
let cancelled = false;
|
||||
const startTime = Date.now();
|
||||
while (Date.now() - startTime < retryDelayMs) {
|
||||
if (
|
||||
abortControllerRef.current?.signal.aborted ||
|
||||
userCancelledRef.current
|
||||
) {
|
||||
cancelled = true;
|
||||
break;
|
||||
}
|
||||
} catch (resumeError) {
|
||||
if (signal?.aborted || userCancelledRef.current) {
|
||||
const isStaleAtAbort =
|
||||
myGeneration !== conversationGenerationRef.current;
|
||||
if (!isStaleAtAbort) {
|
||||
setStreaming(false);
|
||||
}
|
||||
return;
|
||||
}
|
||||
|
||||
debugLog(
|
||||
"stream",
|
||||
"Pre-stream resume failed, falling back to wait/retry: %s",
|
||||
resumeError instanceof Error
|
||||
? resumeError.message
|
||||
: String(resumeError),
|
||||
);
|
||||
// Fall through to existing wait/retry behavior
|
||||
await new Promise((resolve) => setTimeout(resolve, 100));
|
||||
}
|
||||
|
||||
// If resume succeeded, skip the wait/retry loop
|
||||
if (!preStreamResumeResult) {
|
||||
// Show status message
|
||||
const statusId = uid("status");
|
||||
buffersRef.current.byId.set(statusId, {
|
||||
kind: "status",
|
||||
id: statusId,
|
||||
lines: ["Conversation is busy, waiting and retrying…"],
|
||||
});
|
||||
buffersRef.current.order.push(statusId);
|
||||
refreshDerived();
|
||||
// Remove status message
|
||||
buffersRef.current.byId.delete(statusId);
|
||||
buffersRef.current.order = buffersRef.current.order.filter(
|
||||
(id) => id !== statusId,
|
||||
);
|
||||
refreshDerived();
|
||||
|
||||
// Wait with abort checking (same pattern as LLM API error retry)
|
||||
let cancelled = false;
|
||||
const startTime = Date.now();
|
||||
while (Date.now() - startTime < retryDelayMs) {
|
||||
if (
|
||||
abortControllerRef.current?.signal.aborted ||
|
||||
userCancelledRef.current
|
||||
) {
|
||||
cancelled = true;
|
||||
break;
|
||||
}
|
||||
await new Promise((resolve) => setTimeout(resolve, 100));
|
||||
}
|
||||
|
||||
// Remove status message
|
||||
buffersRef.current.byId.delete(statusId);
|
||||
buffersRef.current.order = buffersRef.current.order.filter(
|
||||
(id) => id !== statusId,
|
||||
);
|
||||
refreshDerived();
|
||||
|
||||
if (!cancelled) {
|
||||
// Reset interrupted flag so retry stream chunks are processed
|
||||
buffersRef.current.interrupted = false;
|
||||
restorePinnedPermissionMode();
|
||||
continue;
|
||||
}
|
||||
if (!cancelled) {
|
||||
// Reset interrupted flag so retry stream chunks are processed
|
||||
buffersRef.current.interrupted = false;
|
||||
restorePinnedPermissionMode();
|
||||
continue;
|
||||
}
|
||||
// User pressed ESC - fall through to error handling
|
||||
}
|
||||
@@ -4457,10 +4381,7 @@ export default function App({
|
||||
}
|
||||
|
||||
// Not a recoverable desync - re-throw to outer catch
|
||||
// (unless pre-stream resume already succeeded)
|
||||
if (!preStreamResumeResult) {
|
||||
throw preStreamError;
|
||||
}
|
||||
throw preStreamError;
|
||||
}
|
||||
|
||||
// Check again after network call - user may have pressed Escape during sendMessageStream
|
||||
@@ -4566,24 +4487,19 @@ export default function App({
|
||||
contextTrackerRef.current.currentTurnId++;
|
||||
}
|
||||
|
||||
const drainResult = preStreamResumeResult
|
||||
? preStreamResumeResult
|
||||
: (() => {
|
||||
if (!stream) {
|
||||
throw new Error(
|
||||
"Expected stream when pre-stream resume did not succeed",
|
||||
);
|
||||
}
|
||||
return drainStreamWithResume(
|
||||
stream,
|
||||
buffersRef.current,
|
||||
refreshDerivedThrottled,
|
||||
signal, // Use captured signal, not ref (which may be nulled by handleInterrupt)
|
||||
handleFirstMessage,
|
||||
undefined,
|
||||
contextTrackerRef.current,
|
||||
);
|
||||
})();
|
||||
if (!stream) {
|
||||
throw new Error("Expected stream to be set before drain");
|
||||
}
|
||||
const drainResult = drainStreamWithResume(
|
||||
stream,
|
||||
buffersRef.current,
|
||||
refreshDerivedThrottled,
|
||||
signal, // Use captured signal, not ref (which may be nulled by handleInterrupt)
|
||||
handleFirstMessage,
|
||||
undefined,
|
||||
contextTrackerRef.current,
|
||||
highestSeqIdSeen,
|
||||
);
|
||||
|
||||
const {
|
||||
stopReason,
|
||||
@@ -4591,9 +4507,14 @@ export default function App({
|
||||
approvals,
|
||||
apiDurationMs,
|
||||
lastRunId,
|
||||
lastSeqId,
|
||||
fallbackError,
|
||||
} = await drainResult;
|
||||
|
||||
if (lastSeqId != null) {
|
||||
highestSeqIdSeen = Math.max(highestSeqIdSeen ?? 0, lastSeqId);
|
||||
}
|
||||
|
||||
// Update currentRunId for error reporting in catch block
|
||||
currentRunId = lastRunId ?? undefined;
|
||||
// Expose to statusline
|
||||
@@ -4736,6 +4657,7 @@ export default function App({
|
||||
refreshDerived();
|
||||
|
||||
// Continue conversation with the hook feedback
|
||||
const hookMessageOtid = randomUUID();
|
||||
setTimeout(() => {
|
||||
processConversation(
|
||||
[
|
||||
@@ -4743,6 +4665,7 @@ export default function App({
|
||||
type: "message",
|
||||
role: "user",
|
||||
content: hookMessage,
|
||||
otid: hookMessageOtid,
|
||||
},
|
||||
],
|
||||
{ allowReentry: true },
|
||||
@@ -5235,11 +5158,16 @@ export default function App({
|
||||
toolResultsInFlightRef.current = true;
|
||||
await processConversation(
|
||||
[
|
||||
{ type: "approval", approvals: allResults },
|
||||
{
|
||||
type: "approval",
|
||||
approvals: allResults,
|
||||
otid: randomUUID(),
|
||||
},
|
||||
{
|
||||
type: "message",
|
||||
role: "user",
|
||||
content: queuedContentParts,
|
||||
otid: randomUUID(),
|
||||
},
|
||||
],
|
||||
{ allowReentry: true },
|
||||
@@ -5285,6 +5213,7 @@ export default function App({
|
||||
{
|
||||
type: "approval",
|
||||
approvals: allResults,
|
||||
otid: randomUUID(),
|
||||
},
|
||||
],
|
||||
{ allowReentry: true },
|
||||
@@ -5555,6 +5484,7 @@ export default function App({
|
||||
type: "message" as const,
|
||||
role: "system" as const,
|
||||
content: `<system-reminder>The previous response was empty. Please provide a response with either text content or a tool call.</system-reminder>`,
|
||||
otid: randomUUID(),
|
||||
},
|
||||
];
|
||||
}
|
||||
@@ -5578,6 +5508,8 @@ export default function App({
|
||||
);
|
||||
refreshDerived();
|
||||
|
||||
// Empty-response retry starts a new request/run, so refresh OTIDs.
|
||||
refreshCurrentInputOtids();
|
||||
buffersRef.current.interrupted = false;
|
||||
continue;
|
||||
}
|
||||
@@ -5592,6 +5524,9 @@ export default function App({
|
||||
retriable &&
|
||||
llmApiErrorRetriesRef.current < LLM_API_ERROR_MAX_RETRIES
|
||||
) {
|
||||
// Do NOT replay the same run for terminal post-stream errors
|
||||
// (e.g. llm_api_error). A retry should create a new run.
|
||||
|
||||
llmApiErrorRetriesRef.current += 1;
|
||||
const attempt = llmApiErrorRetriesRef.current;
|
||||
const delayMs = getRetryDelayMs({
|
||||
@@ -5659,9 +5594,11 @@ export default function App({
|
||||
}
|
||||
|
||||
if (!cancelled) {
|
||||
// Post-stream retry is a new request/run, so refresh OTIDs.
|
||||
refreshCurrentInputOtids();
|
||||
// Reset interrupted flag so retry stream chunks are processed
|
||||
buffersRef.current.interrupted = false;
|
||||
// Retry by continuing the while loop (same currentInput)
|
||||
// Retry by continuing the while loop with fresh OTIDs.
|
||||
continue;
|
||||
}
|
||||
// User pressed ESC - fall through to error handling
|
||||
@@ -6924,7 +6861,7 @@ export default function App({
|
||||
if (allResults.length > 0) {
|
||||
toolResultsInFlightRef.current = true;
|
||||
await processConversation([
|
||||
{ type: "approval", approvals: allResults },
|
||||
{ type: "approval", approvals: allResults, otid: randomUUID() },
|
||||
]);
|
||||
toolResultsInFlightRef.current = false;
|
||||
|
||||
@@ -8028,6 +7965,7 @@ export default function App({
|
||||
type: "message",
|
||||
role: "user",
|
||||
content: buildTextParts(systemMsg, prompt),
|
||||
otid: randomUUID(),
|
||||
},
|
||||
]);
|
||||
} else {
|
||||
@@ -9449,6 +9387,7 @@ export default function App({
|
||||
type: "message",
|
||||
role: "user",
|
||||
content: buildTextParts(skillMessage),
|
||||
otid: randomUUID(),
|
||||
},
|
||||
]);
|
||||
} catch (error) {
|
||||
@@ -9512,6 +9451,7 @@ export default function App({
|
||||
type: "message",
|
||||
role: "user",
|
||||
content: rememberParts,
|
||||
otid: randomUUID(),
|
||||
},
|
||||
]);
|
||||
} catch (error) {
|
||||
@@ -9668,6 +9608,52 @@ export default function App({
|
||||
type: "message",
|
||||
role: "user",
|
||||
content: buildTextParts(initMessage),
|
||||
otid: randomUUID(),
|
||||
},
|
||||
]);
|
||||
} catch (error) {
|
||||
const errorDetails = formatErrorDetails(error, agentId);
|
||||
cmd.fail(`Failed: ${errorDetails}`);
|
||||
} finally {
|
||||
setCommandRunning(false);
|
||||
}
|
||||
return { submitted: true };
|
||||
}
|
||||
|
||||
// Special handling for /doctor command
|
||||
if (trimmed === "/doctor") {
|
||||
const cmd = commandRunner.start(msg, "Gathering project context...");
|
||||
|
||||
const approvalCheck = await checkPendingApprovalsForSlashCommand();
|
||||
if (approvalCheck.blocked) {
|
||||
cmd.fail(
|
||||
"Pending approval(s). Resolve approvals before running /doctor.",
|
||||
);
|
||||
return { submitted: false };
|
||||
}
|
||||
|
||||
setCommandRunning(true);
|
||||
try {
|
||||
cmd.finish(
|
||||
"Running memory doctor... I'll ask a few questions to refine memory structure.",
|
||||
true,
|
||||
);
|
||||
|
||||
const { context: gitContext } = gatherInitGitContext();
|
||||
const memoryDir = settingsManager.isMemfsEnabled(agentId)
|
||||
? getMemoryFilesystemRoot(agentId)
|
||||
: undefined;
|
||||
|
||||
const doctorMessage = buildDoctorMessage({
|
||||
gitContext,
|
||||
memoryDir,
|
||||
});
|
||||
|
||||
await processConversation([
|
||||
{
|
||||
type: "message",
|
||||
role: "user",
|
||||
content: buildTextParts(doctorMessage),
|
||||
},
|
||||
]);
|
||||
} catch (error) {
|
||||
@@ -9739,6 +9725,7 @@ export default function App({
|
||||
content: buildTextParts(
|
||||
`${SYSTEM_REMINDER_OPEN}\n${prompt}\n${SYSTEM_REMINDER_CLOSE}`,
|
||||
),
|
||||
otid: randomUUID(),
|
||||
},
|
||||
]);
|
||||
} catch (error) {
|
||||
@@ -10357,12 +10344,14 @@ ${SYSTEM_REMINDER_CLOSE}
|
||||
{
|
||||
type: "approval",
|
||||
approvals: recoveryApprovalResults,
|
||||
otid: randomUUID(),
|
||||
},
|
||||
{
|
||||
type: "message",
|
||||
role: "user",
|
||||
content:
|
||||
messageContent as unknown as MessageCreate["content"],
|
||||
otid: randomUUID(),
|
||||
},
|
||||
];
|
||||
|
||||
@@ -10638,6 +10627,7 @@ ${SYSTEM_REMINDER_CLOSE}
|
||||
type: "message",
|
||||
role: "user",
|
||||
content: messageContent as unknown as MessageCreate["content"],
|
||||
otid: randomUUID(),
|
||||
});
|
||||
|
||||
await processConversation(initialInput, {
|
||||
@@ -10707,7 +10697,8 @@ ${SYSTEM_REMINDER_CLOSE}
|
||||
!anySelectorOpen && // Don't dequeue while a selector/overlay is open
|
||||
!waitingForQueueCancelRef.current && // Don't dequeue while waiting for cancel
|
||||
!userCancelledRef.current && // Don't dequeue if user just cancelled
|
||||
!abortControllerRef.current // Don't dequeue while processConversation is still active
|
||||
!abortControllerRef.current && // Don't dequeue while processConversation is still active
|
||||
!dequeueInFlightRef.current // Don't dequeue while previous dequeue submit is still in flight
|
||||
) {
|
||||
// consumeItems(n) fires onDequeued → setQueueDisplay(prev => prev.slice(n)).
|
||||
const batch = tuiQueueRef.current?.consumeItems(queueLen);
|
||||
@@ -10737,7 +10728,16 @@ ${SYSTEM_REMINDER_CLOSE}
|
||||
|
||||
// Submit via normal flow — overrideContentPartsRef carries rich content parts.
|
||||
overrideContentPartsRef.current = queuedContentParts;
|
||||
onSubmitRef.current(concatenatedMessage);
|
||||
// Lock prevents re-entrant dequeue if deps churn before processConversation
|
||||
// sets abortControllerRef (which is the normal long-term gate).
|
||||
dequeueInFlightRef.current = true;
|
||||
void onSubmitRef.current(concatenatedMessage).finally(() => {
|
||||
dequeueInFlightRef.current = false;
|
||||
// If more items arrived while in-flight, bump epoch so the effect re-runs.
|
||||
if ((tuiQueueRef.current?.length ?? 0) > 0) {
|
||||
setDequeueEpoch((e) => e + 1);
|
||||
}
|
||||
});
|
||||
} else if (hasAnythingQueued) {
|
||||
// Log why dequeue was blocked (useful for debugging stuck queues)
|
||||
debugLog(
|
||||
@@ -10986,6 +10986,7 @@ ${SYSTEM_REMINDER_CLOSE}
|
||||
type: "message",
|
||||
role: "user",
|
||||
content: buildQueuedContentParts(queuedItemsToAppend),
|
||||
otid: randomUUID(),
|
||||
});
|
||||
refreshDerived();
|
||||
} else if (hadNotifications) {
|
||||
@@ -11255,6 +11256,7 @@ ${SYSTEM_REMINDER_CLOSE}
|
||||
{
|
||||
type: "approval",
|
||||
approvals: allResults as ApprovalResult[],
|
||||
otid: randomUUID(),
|
||||
},
|
||||
]);
|
||||
} finally {
|
||||
|
||||
@@ -40,6 +40,15 @@ export const commands: Record<string, Command> = {
|
||||
return "Initializing memory...";
|
||||
},
|
||||
},
|
||||
"/doctor": {
|
||||
desc: "Audit and refine your memory structure",
|
||||
order: 12.1,
|
||||
noArgs: true,
|
||||
handler: () => {
|
||||
// Handled specially in App.tsx to send doctor prompt
|
||||
return "Running memory doctor...";
|
||||
},
|
||||
},
|
||||
"/remember": {
|
||||
desc: "Remember something from the conversation (/remember [instructions])",
|
||||
order: 13,
|
||||
|
||||
@@ -202,7 +202,7 @@ const DynamicPreview: React.FC<DynamicPreviewProps> = ({
|
||||
);
|
||||
}
|
||||
|
||||
if (t === "apply_patch" || t === "applypatch") {
|
||||
if (t === "apply_patch" || t === "applypatch" || t === "memory_apply_patch") {
|
||||
const inputVal = parsedArgs?.input;
|
||||
if (typeof inputVal === "string") {
|
||||
const operations = parsePatchOperations(inputVal);
|
||||
@@ -683,7 +683,12 @@ export const ApprovalDialog = memo(function ApprovalDialog({
|
||||
|
||||
// For Patch tools - parse hunks directly (patches ARE diffs, no need to recompute)
|
||||
const t = approvalRequest.toolName.toLowerCase();
|
||||
if ((t === "apply_patch" || t === "applypatch") && parsedArgs?.input) {
|
||||
if (
|
||||
(t === "apply_patch" ||
|
||||
t === "applypatch" ||
|
||||
t === "memory_apply_patch") &&
|
||||
parsedArgs?.input
|
||||
) {
|
||||
const operations = parsePatchOperations(parsedArgs.input as string);
|
||||
for (const op of operations) {
|
||||
const key = `${toolCallId}:${op.path}`;
|
||||
@@ -747,7 +752,11 @@ export const ApprovalDialog = memo(function ApprovalDialog({
|
||||
if (!approvalRequest) return "";
|
||||
const t = approvalRequest.toolName.toLowerCase();
|
||||
// For patch tools, determine header from operation type
|
||||
if (t === "apply_patch" || t === "applypatch") {
|
||||
if (
|
||||
t === "apply_patch" ||
|
||||
t === "applypatch" ||
|
||||
t === "memory_apply_patch"
|
||||
) {
|
||||
if (parsedArgs?.input && typeof parsedArgs.input === "string") {
|
||||
const operations = parsePatchOperations(parsedArgs.input);
|
||||
if (operations.length > 0) {
|
||||
@@ -819,7 +828,12 @@ export const ApprovalDialog = memo(function ApprovalDialog({
|
||||
}
|
||||
}
|
||||
// For patch tools, show file path(s) being modified
|
||||
if ((t === "apply_patch" || t === "applypatch") && parsedArgs.input) {
|
||||
if (
|
||||
(t === "apply_patch" ||
|
||||
t === "applypatch" ||
|
||||
t === "memory_apply_patch") &&
|
||||
parsedArgs.input
|
||||
) {
|
||||
const operations = parsePatchOperations(parsedArgs.input as string);
|
||||
if (operations.length > 0) {
|
||||
const { relative } = require("node:path");
|
||||
@@ -958,6 +972,7 @@ function getHeaderLabel(toolName: string): string {
|
||||
if (t === "list_dir") return "List Files";
|
||||
if (t === "grep_files") return "Search in Files";
|
||||
if (t === "apply_patch") return "Apply Patch";
|
||||
if (t === "memory_apply_patch") return "Memory Patch";
|
||||
if (t === "update_plan") return "Plan update";
|
||||
// Codex toolset (PascalCase → lowercased)
|
||||
if (t === "shellcommand") return "Shell command";
|
||||
|
||||
@@ -161,7 +161,8 @@ export const ApprovalPreview = memo(
|
||||
toolName === "str_replace_editor" ||
|
||||
toolName === "str_replace_based_edit_tool" ||
|
||||
toolName === "apply_patch" ||
|
||||
toolName === "ApplyPatch"
|
||||
toolName === "ApplyPatch" ||
|
||||
toolName === "memory_apply_patch"
|
||||
) {
|
||||
const headerText = getFileEditHeader(toolName, toolArgs);
|
||||
|
||||
@@ -171,7 +172,9 @@ export const ApprovalPreview = memo(
|
||||
// Handle patch tools (can have multiple files)
|
||||
if (
|
||||
args.input &&
|
||||
(toolName === "apply_patch" || toolName === "ApplyPatch")
|
||||
(toolName === "apply_patch" ||
|
||||
toolName === "ApplyPatch" ||
|
||||
toolName === "memory_apply_patch")
|
||||
) {
|
||||
const operations = parsePatchOperations(args.input);
|
||||
|
||||
|
||||
@@ -1,6 +1,7 @@
|
||||
import { Box } from "ink";
|
||||
import { memo } from "react";
|
||||
import { INTERRUPTED_BY_USER } from "../../constants";
|
||||
import { clipToolReturn } from "../../tools/manager";
|
||||
import type { StreamingState } from "../helpers/accumulator";
|
||||
import { useTerminalWidth } from "../hooks/useTerminalWidth";
|
||||
import { BlinkDot } from "./BlinkDot.js";
|
||||
@@ -90,7 +91,9 @@ export const BashCommandMessage = memo(
|
||||
<Text>{" ⎿ "}</Text>
|
||||
</Box>
|
||||
<Box flexGrow={1} width={Math.max(0, columns - 5)}>
|
||||
<MarkdownDisplay text={line.output.replace(/\n+$/, "")} />
|
||||
<MarkdownDisplay
|
||||
text={clipToolReturn(line.output).replace(/\n+$/, "")}
|
||||
/>
|
||||
</Box>
|
||||
</Box>
|
||||
)}
|
||||
|
||||
@@ -37,13 +37,15 @@ interface PreviewLine {
|
||||
// Enriched conversation with message data
|
||||
interface EnrichedConversation {
|
||||
conversation: Conversation;
|
||||
previewLines: PreviewLine[]; // Last 1-3 user/assistant messages
|
||||
lastActiveAt: string | null;
|
||||
messageCount: number;
|
||||
previewLines: PreviewLine[] | null; // null = not yet loaded
|
||||
lastActiveAt: string | null; // Falls back to updated_at until enriched
|
||||
messageCount: number; // -1 = unknown/loading
|
||||
enriched: boolean; // Whether message data has been fetched
|
||||
}
|
||||
|
||||
const DISPLAY_PAGE_SIZE = 3;
|
||||
const FETCH_PAGE_SIZE = 20;
|
||||
const ENRICH_MESSAGE_LIMIT = 20; // Same as original fetch limit
|
||||
|
||||
/**
|
||||
* Format a relative time string from a date
|
||||
@@ -217,12 +219,52 @@ export function ConversationSelector({
|
||||
const [loadingMore, setLoadingMore] = useState(false);
|
||||
const [hasMore, setHasMore] = useState(true);
|
||||
const [error, setError] = useState<string | null>(null);
|
||||
const [enriching, setEnriching] = useState(false);
|
||||
|
||||
// Selection state
|
||||
const [selectedIndex, setSelectedIndex] = useState(0);
|
||||
const [page, setPage] = useState(0);
|
||||
|
||||
// Load conversations and enrich with message data
|
||||
// Enrich a single conversation with message data, updating state in-place
|
||||
const enrichConversation = useCallback(
|
||||
async (client: Letta, convId: string) => {
|
||||
try {
|
||||
const messages = await client.conversations.messages.list(convId, {
|
||||
limit: ENRICH_MESSAGE_LIMIT,
|
||||
order: "desc",
|
||||
});
|
||||
const chronological = [...messages.getPaginatedItems()].reverse();
|
||||
const stats = getMessageStats(chronological);
|
||||
setConversations((prev) =>
|
||||
prev.map((c) =>
|
||||
c.conversation.id === convId
|
||||
? {
|
||||
...c,
|
||||
previewLines: stats.previewLines,
|
||||
lastActiveAt: stats.lastActiveAt || c.lastActiveAt,
|
||||
messageCount: stats.messageCount,
|
||||
enriched: true,
|
||||
}
|
||||
: c,
|
||||
),
|
||||
);
|
||||
return stats.messageCount;
|
||||
} catch {
|
||||
// Mark as enriched even on error so we don't retry
|
||||
setConversations((prev) =>
|
||||
prev.map((c) =>
|
||||
c.conversation.id === convId
|
||||
? { ...c, previewLines: [], enriched: true }
|
||||
: c,
|
||||
),
|
||||
);
|
||||
return -1;
|
||||
}
|
||||
},
|
||||
[],
|
||||
);
|
||||
|
||||
// Load conversations — shows list immediately, enriches progressively
|
||||
const loadConversations = useCallback(
|
||||
async (afterCursor?: string | null) => {
|
||||
const isLoadingMore = !!afterCursor;
|
||||
@@ -237,38 +279,8 @@ export function ConversationSelector({
|
||||
const client = clientRef.current || (await getClient());
|
||||
clientRef.current = client;
|
||||
|
||||
// Fetch default conversation data (agent's primary message history)
|
||||
// Only fetch on initial load (not when paginating)
|
||||
let defaultConversation: EnrichedConversation | null = null;
|
||||
if (!afterCursor) {
|
||||
try {
|
||||
const defaultMessages = await client.agents.messages.list(agentId, {
|
||||
conversation_id: "default",
|
||||
limit: 20,
|
||||
order: "desc",
|
||||
});
|
||||
const defaultMsgItems = defaultMessages.getPaginatedItems();
|
||||
if (defaultMsgItems.length > 0) {
|
||||
const defaultStats = getMessageStats(
|
||||
[...defaultMsgItems].reverse(),
|
||||
);
|
||||
defaultConversation = {
|
||||
conversation: {
|
||||
id: "default",
|
||||
agent_id: agentId,
|
||||
created_at: new Date().toISOString(),
|
||||
} as Conversation,
|
||||
previewLines: defaultStats.previewLines,
|
||||
lastActiveAt: defaultStats.lastActiveAt,
|
||||
messageCount: defaultStats.messageCount,
|
||||
};
|
||||
}
|
||||
} catch {
|
||||
// If we can't fetch default messages, just skip showing it
|
||||
}
|
||||
}
|
||||
|
||||
const result = await client.conversations.list({
|
||||
// Phase 1: Fetch conversation list + default messages in parallel
|
||||
const conversationListPromise = client.conversations.list({
|
||||
agent_id: agentId,
|
||||
limit: FETCH_PAGE_SIZE,
|
||||
...(afterCursor && { after: afterCursor }),
|
||||
@@ -276,73 +288,120 @@ export function ConversationSelector({
|
||||
order_by: "last_run_completion",
|
||||
});
|
||||
|
||||
// Enrich conversations with message data in parallel
|
||||
const enrichedConversations = await Promise.all(
|
||||
result.map(async (conv) => {
|
||||
try {
|
||||
// Fetch recent messages to get stats (desc order = newest first)
|
||||
const messages = await client.conversations.messages.list(
|
||||
conv.id,
|
||||
{ limit: 20, order: "desc" },
|
||||
);
|
||||
// Reverse to chronological for getMessageStats (expects oldest-first)
|
||||
const chronologicalMessages = [
|
||||
...messages.getPaginatedItems(),
|
||||
].reverse();
|
||||
const stats = getMessageStats(chronologicalMessages);
|
||||
return {
|
||||
conversation: conv,
|
||||
previewLines: stats.previewLines,
|
||||
lastActiveAt: stats.lastActiveAt,
|
||||
messageCount: stats.messageCount,
|
||||
};
|
||||
} catch {
|
||||
// If we fail to fetch messages, show conversation anyway with -1 to indicate error
|
||||
return {
|
||||
conversation: conv,
|
||||
previewLines: [],
|
||||
lastActiveAt: null,
|
||||
messageCount: -1, // Unknown, don't filter out
|
||||
};
|
||||
}
|
||||
}),
|
||||
);
|
||||
// Fetch default conversation in parallel (not sequentially before)
|
||||
const defaultPromise: Promise<EnrichedConversation | null> =
|
||||
!afterCursor
|
||||
? client.agents.messages
|
||||
.list(agentId, {
|
||||
conversation_id: "default",
|
||||
limit: ENRICH_MESSAGE_LIMIT,
|
||||
order: "desc",
|
||||
})
|
||||
.then((msgs) => {
|
||||
const items = msgs.getPaginatedItems();
|
||||
if (items.length === 0) return null;
|
||||
const stats = getMessageStats([...items].reverse());
|
||||
return {
|
||||
conversation: {
|
||||
id: "default",
|
||||
agent_id: agentId,
|
||||
created_at: new Date().toISOString(),
|
||||
} as Conversation,
|
||||
previewLines: stats.previewLines,
|
||||
lastActiveAt: stats.lastActiveAt,
|
||||
messageCount: stats.messageCount,
|
||||
enriched: true,
|
||||
};
|
||||
})
|
||||
.catch(() => null)
|
||||
: Promise.resolve(null);
|
||||
|
||||
// Filter out empty conversations (messageCount === 0)
|
||||
// Keep conversations with messageCount > 0 or -1 (error/unknown)
|
||||
const nonEmptyConversations = enrichedConversations.filter(
|
||||
(c) => c.messageCount !== 0,
|
||||
);
|
||||
const [result, defaultConversation] = await Promise.all([
|
||||
conversationListPromise,
|
||||
defaultPromise,
|
||||
]);
|
||||
|
||||
// Build unenriched conversation list using data already on the object
|
||||
const unenrichedList: EnrichedConversation[] = result.map((conv) => ({
|
||||
conversation: conv,
|
||||
previewLines: null, // Not loaded yet
|
||||
lastActiveAt: conv.updated_at ?? conv.created_at ?? null,
|
||||
messageCount: -1, // Unknown until enriched
|
||||
enriched: false,
|
||||
}));
|
||||
|
||||
// Don't filter yet — we'll remove empties after enrichment confirms messageCount
|
||||
const nonEmptyList = unenrichedList;
|
||||
|
||||
const newCursor =
|
||||
result.length === FETCH_PAGE_SIZE
|
||||
? (result[result.length - 1]?.id ?? null)
|
||||
: null;
|
||||
|
||||
// Phase 1 render: show conversation list immediately
|
||||
if (isLoadingMore) {
|
||||
setConversations((prev) => [...prev, ...nonEmptyConversations]);
|
||||
setConversations((prev) => [...prev, ...nonEmptyList]);
|
||||
} else {
|
||||
// Prepend default conversation to the list (if it has messages)
|
||||
const allConversations = defaultConversation
|
||||
? [defaultConversation, ...nonEmptyConversations]
|
||||
: nonEmptyConversations;
|
||||
? [defaultConversation, ...nonEmptyList]
|
||||
: nonEmptyList;
|
||||
setConversations(allConversations);
|
||||
setPage(0);
|
||||
setSelectedIndex(0);
|
||||
}
|
||||
setCursor(newCursor);
|
||||
setHasMore(newCursor !== null);
|
||||
} catch (err) {
|
||||
setError(err instanceof Error ? err.message : String(err));
|
||||
} finally {
|
||||
|
||||
// Flip loading off now — list is visible, enrichment happens in background
|
||||
if (isLoadingMore) {
|
||||
setLoadingMore(false);
|
||||
} else {
|
||||
setLoading(false);
|
||||
}
|
||||
|
||||
// Phase 2: enrich visible page first, then rest in background
|
||||
setEnriching(true);
|
||||
const toEnrich = nonEmptyList.filter((c) => !c.enriched);
|
||||
const firstPageItems = toEnrich.slice(0, DISPLAY_PAGE_SIZE);
|
||||
const restItems = toEnrich.slice(DISPLAY_PAGE_SIZE);
|
||||
|
||||
// Enrich first page in parallel
|
||||
const firstPageResults = await Promise.all(
|
||||
firstPageItems.map((c) =>
|
||||
enrichConversation(client, c.conversation.id),
|
||||
),
|
||||
);
|
||||
|
||||
// Remove conversations that turned out empty after enrichment
|
||||
const emptyConvIds = new Set(
|
||||
firstPageItems
|
||||
.filter((_, i) => firstPageResults[i] === 0)
|
||||
.map((c) => c.conversation.id),
|
||||
);
|
||||
if (emptyConvIds.size > 0) {
|
||||
setConversations((prev) =>
|
||||
prev.filter((c) => !emptyConvIds.has(c.conversation.id)),
|
||||
);
|
||||
}
|
||||
|
||||
// Enrich remaining conversations one by one in background
|
||||
for (const item of restItems) {
|
||||
const count = await enrichConversation(client, item.conversation.id);
|
||||
if (count === 0) {
|
||||
setConversations((prev) =>
|
||||
prev.filter((c) => c.conversation.id !== item.conversation.id),
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
setEnriching(false);
|
||||
} catch (err) {
|
||||
setError(err instanceof Error ? err.message : String(err));
|
||||
setLoading(false);
|
||||
setLoadingMore(false);
|
||||
}
|
||||
},
|
||||
[agentId],
|
||||
[agentId, enrichConversation],
|
||||
);
|
||||
|
||||
// Initial load
|
||||
@@ -350,6 +409,23 @@ export function ConversationSelector({
|
||||
loadConversations();
|
||||
}, [loadConversations]);
|
||||
|
||||
// Re-enrich when page changes (prioritize newly visible unenriched items)
|
||||
useEffect(() => {
|
||||
const client = clientRef.current;
|
||||
if (!client || loading) return;
|
||||
|
||||
const visibleItems = conversations.slice(
|
||||
page * DISPLAY_PAGE_SIZE,
|
||||
(page + 1) * DISPLAY_PAGE_SIZE,
|
||||
);
|
||||
const unenriched = visibleItems.filter((c) => !c.enriched);
|
||||
if (unenriched.length === 0) return;
|
||||
|
||||
for (const item of unenriched) {
|
||||
enrichConversation(client, item.conversation.id);
|
||||
}
|
||||
}, [page, loading, conversations, enrichConversation]);
|
||||
|
||||
// Pagination calculations
|
||||
const totalPages = Math.ceil(conversations.length / DISPLAY_PAGE_SIZE);
|
||||
const startIndex = page * DISPLAY_PAGE_SIZE;
|
||||
@@ -441,7 +517,19 @@ export function ConversationSelector({
|
||||
const bracket = <Text dimColor>{"⎿ "}</Text>;
|
||||
const indent = " "; // Same width as "⎿ " for alignment
|
||||
|
||||
// Priority 2: Preview lines with emoji prefixes
|
||||
// Still loading message data
|
||||
if (previewLines === null) {
|
||||
return (
|
||||
<Box flexDirection="row" marginLeft={2}>
|
||||
{bracket}
|
||||
<Text dimColor italic>
|
||||
Loading preview...
|
||||
</Text>
|
||||
</Box>
|
||||
);
|
||||
}
|
||||
|
||||
// Has preview lines from messages
|
||||
if (previewLines.length > 0) {
|
||||
return (
|
||||
<>
|
||||
@@ -558,6 +646,15 @@ export function ConversationSelector({
|
||||
</Box>
|
||||
)}
|
||||
|
||||
{/* Enriching indicator */}
|
||||
{!loading && enriching && (
|
||||
<Box marginBottom={1}>
|
||||
<Text dimColor italic>
|
||||
Loading previews...
|
||||
</Text>
|
||||
</Box>
|
||||
)}
|
||||
|
||||
{/* Empty state */}
|
||||
{!loading && !error && conversations.length === 0 && (
|
||||
<Box flexDirection="column">
|
||||
|
||||
@@ -59,7 +59,7 @@ function getHeaderText(fileEdit: FileEditInfo): string {
|
||||
const t = fileEdit.toolName.toLowerCase();
|
||||
|
||||
// Handle patch tools (multi-file)
|
||||
if (t === "apply_patch" || t === "applypatch") {
|
||||
if (t === "apply_patch" || t === "applypatch" || t === "memory_apply_patch") {
|
||||
if (fileEdit.patchInput) {
|
||||
const operations = parsePatchOperations(fileEdit.patchInput);
|
||||
if (operations.length > 1) {
|
||||
|
||||
@@ -22,12 +22,11 @@ export function MemoryDiffRenderer({
|
||||
try {
|
||||
const args = JSON.parse(argsText);
|
||||
|
||||
// Handle memory_apply_patch tool (unified diff format)
|
||||
// Handle memory_apply_patch tool (codex-style apply_patch input)
|
||||
if (toolName === "memory_apply_patch") {
|
||||
const label = args.label || "unknown";
|
||||
const patch = args.patch || "";
|
||||
const patch = typeof args.input === "string" ? args.input : "";
|
||||
return (
|
||||
<PatchDiffRenderer label={label} patch={patch} columns={columns} />
|
||||
<PatchDiffRenderer label="memory" patch={patch} columns={columns} />
|
||||
);
|
||||
}
|
||||
|
||||
|
||||
@@ -128,6 +128,22 @@ export function MessageSearch({
|
||||
// Cache results per query+mode+range combination to avoid re-fetching
|
||||
const resultsCache = useRef<Map<string, MessageSearchResponse>>(new Map());
|
||||
|
||||
// Warm tpuf cache on mount (fire-and-forget)
|
||||
useEffect(() => {
|
||||
const warmCache = async () => {
|
||||
try {
|
||||
const client = await getClient();
|
||||
await client.post("/v1/messages/search", {
|
||||
body: {},
|
||||
query: { warm_only: true },
|
||||
});
|
||||
} catch {
|
||||
// Silently ignore - cache warm is best-effort
|
||||
}
|
||||
};
|
||||
void warmCache();
|
||||
}, []);
|
||||
|
||||
// Get cache key for a specific query+mode+range combination
|
||||
const getCacheKey = useCallback(
|
||||
(query: string, mode: SearchMode, range: SearchRange) => {
|
||||
|
||||
@@ -37,6 +37,13 @@ function looksLikeMojibake(value: string): boolean {
|
||||
}
|
||||
}
|
||||
|
||||
// A lone multi-byte lead with even one valid continuation is mojibake
|
||||
if (byte >= 0xc2 && byte <= 0xf4) {
|
||||
if (i + 1 < value.length && isContinuationByte(value.charCodeAt(i + 1))) {
|
||||
sawUtf8Sequence = true;
|
||||
}
|
||||
}
|
||||
|
||||
if (byte >= 0xf0 && byte <= 0xf4) {
|
||||
if (
|
||||
i + 3 < value.length &&
|
||||
|
||||
@@ -383,6 +383,11 @@ export function markCurrentLineAsFinished(b: Buffers) {
|
||||
* @param setInterruptedFlag - Whether to set the interrupted flag (default true).
|
||||
* Pass false when clearing stale tool calls at stream startup to avoid race conditions
|
||||
* with concurrent processConversation calls reading the flag.
|
||||
* @param reason - Why the cancellation is happening.
|
||||
* @param skipMarkCurrentLine - When true, do NOT call markCurrentLineAsFinished.
|
||||
* Use this when a stream resume will follow: the resume stream will finalize the
|
||||
* streaming line with its full text, so prematurely marking it finished would
|
||||
* cause it to be committed to static with truncated content.
|
||||
* @returns true if any tool calls were marked as cancelled
|
||||
*/
|
||||
export type CancelReason =
|
||||
@@ -402,6 +407,7 @@ export function markIncompleteToolsAsCancelled(
|
||||
b: Buffers,
|
||||
setInterruptedFlag = true,
|
||||
reason: CancelReason = "internal_cancel",
|
||||
skipMarkCurrentLine = false,
|
||||
): boolean {
|
||||
// Mark buffer as interrupted to skip stale throttled refreshes
|
||||
// (only when actually interrupting, not when clearing stale state at startup)
|
||||
@@ -422,8 +428,12 @@ export function markIncompleteToolsAsCancelled(
|
||||
anyToolsCancelled = true;
|
||||
}
|
||||
}
|
||||
// Also mark any streaming assistant/reasoning lines as finished
|
||||
markCurrentLineAsFinished(b);
|
||||
// Mark any streaming assistant/reasoning lines as finished, unless a resume
|
||||
// is about to follow (in which case the resume stream will finalize it with
|
||||
// full text — marking it now would freeze truncated content in static).
|
||||
if (!skipMarkCurrentLine) {
|
||||
markCurrentLineAsFinished(b);
|
||||
}
|
||||
return anyToolsCancelled;
|
||||
}
|
||||
|
||||
|
||||
@@ -291,3 +291,27 @@ Once invoked, follow the instructions from the \`initializing-memory\` skill to
|
||||
${args.gitContext}
|
||||
${SYSTEM_REMINDER_CLOSE}`;
|
||||
}
|
||||
|
||||
/** Message for the primary agent via processConversation when user runs /doctor. */
|
||||
export function buildDoctorMessage(args: {
|
||||
gitContext: string;
|
||||
memoryDir?: string;
|
||||
}): string {
|
||||
const memfsSection = args.memoryDir
|
||||
? `\n## Memory filesystem\n\nMemory filesystem is enabled. Memory directory: \`${args.memoryDir}\`\n`
|
||||
: "";
|
||||
|
||||
return `${SYSTEM_REMINDER_OPEN}
|
||||
The user has requested a memory structure check via /doctor.
|
||||
${memfsSection}
|
||||
## 1. Invoke the context_doctor skill
|
||||
|
||||
Use the \`Skill\` tool with \`skill: "context_doctor"\` to load guidance for memory structure refinement.
|
||||
|
||||
## 2. Follow the skill instructions
|
||||
|
||||
Once invoked, follow the instructions from the \`context_doctor\` skill.
|
||||
|
||||
${args.gitContext}
|
||||
${SYSTEM_REMINDER_CLOSE}`;
|
||||
}
|
||||
|
||||
@@ -9,6 +9,7 @@ import {
|
||||
import { homedir, tmpdir } from "node:os";
|
||||
import { join } from "node:path";
|
||||
import { MEMORY_SYSTEM_DIR } from "../../agent/memoryFilesystem";
|
||||
import { getDirectoryLimits } from "../../utils/directoryLimits";
|
||||
import { parseFrontmatter } from "../../utils/frontmatter";
|
||||
import { type Line, linesToTranscript } from "./accumulator";
|
||||
|
||||
@@ -192,24 +193,96 @@ function buildParentMemoryTree(files: ParentMemoryFile[]): string {
|
||||
},
|
||||
);
|
||||
|
||||
const lines: string[] = ["/memory/"];
|
||||
const limits = getDirectoryLimits();
|
||||
const maxLines = Math.max(2, limits.memfsTreeMaxLines);
|
||||
const maxChars = Math.max(128, limits.memfsTreeMaxChars);
|
||||
const maxChildrenPerDir = Math.max(1, limits.memfsTreeMaxChildrenPerDir);
|
||||
|
||||
const render = (node: TreeNode, prefix: string) => {
|
||||
const entries = sortedEntries(node);
|
||||
for (const [index, [name, child]] of entries.entries()) {
|
||||
const isLast = index === entries.length - 1;
|
||||
const branch = isLast ? "└──" : "├──";
|
||||
const suffix = child.isFile ? "" : "/";
|
||||
const description = child.description ? ` (${child.description})` : "";
|
||||
lines.push(`${prefix}${branch} ${name}${suffix}${description}`);
|
||||
const rootLine = "/memory/";
|
||||
const lines: string[] = [rootLine];
|
||||
let totalChars = rootLine.length;
|
||||
|
||||
const countTreeEntries = (node: TreeNode): number => {
|
||||
let total = 0;
|
||||
for (const [, child] of node.children) {
|
||||
total += 1;
|
||||
if (child.children.size > 0) {
|
||||
const nextPrefix = `${prefix}${isLast ? " " : "│ "}`;
|
||||
render(child, nextPrefix);
|
||||
total += countTreeEntries(child);
|
||||
}
|
||||
}
|
||||
return total;
|
||||
};
|
||||
|
||||
render(root, "");
|
||||
const canAppendLine = (line: string): boolean => {
|
||||
const nextLineCount = lines.length + 1;
|
||||
const nextCharCount = totalChars + 1 + line.length;
|
||||
return nextLineCount <= maxLines && nextCharCount <= maxChars;
|
||||
};
|
||||
|
||||
const render = (node: TreeNode, prefix: string): boolean => {
|
||||
const entries = sortedEntries(node);
|
||||
const visibleEntries = entries.slice(0, maxChildrenPerDir);
|
||||
const omittedEntries = Math.max(0, entries.length - visibleEntries.length);
|
||||
|
||||
const renderItems: Array<
|
||||
| { kind: "entry"; name: string; child: TreeNode }
|
||||
| { kind: "omitted"; omittedCount: number }
|
||||
> = visibleEntries.map(([name, child]) => ({
|
||||
kind: "entry",
|
||||
name,
|
||||
child,
|
||||
}));
|
||||
|
||||
if (omittedEntries > 0) {
|
||||
renderItems.push({ kind: "omitted", omittedCount: omittedEntries });
|
||||
}
|
||||
|
||||
for (const [index, item] of renderItems.entries()) {
|
||||
const isLast = index === renderItems.length - 1;
|
||||
const branch = isLast ? "└──" : "├──";
|
||||
const line =
|
||||
item.kind === "entry"
|
||||
? `${prefix}${branch} ${item.name}${item.child.isFile ? "" : "/"}${item.child.description ? ` (${item.child.description})` : ""}`
|
||||
: `${prefix}${branch} … (${item.omittedCount.toLocaleString()} more entries)`;
|
||||
|
||||
if (!canAppendLine(line)) {
|
||||
return false;
|
||||
}
|
||||
|
||||
lines.push(line);
|
||||
totalChars += 1 + line.length;
|
||||
|
||||
if (item.kind === "entry" && item.child.children.size > 0) {
|
||||
const nextPrefix = `${prefix}${isLast ? " " : "│ "}`;
|
||||
if (!render(item.child, nextPrefix)) {
|
||||
return false;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return true;
|
||||
};
|
||||
|
||||
const totalEntries = countTreeEntries(root);
|
||||
const fullyRendered = render(root, "");
|
||||
|
||||
if (!fullyRendered) {
|
||||
while (lines.length > 1) {
|
||||
const shownEntries = Math.max(0, lines.length - 1);
|
||||
const omittedEntries = Math.max(1, totalEntries - shownEntries);
|
||||
const notice = `[Tree truncated: showing ${shownEntries.toLocaleString()} of ${totalEntries.toLocaleString()} entries. ${omittedEntries.toLocaleString()} omitted.]`;
|
||||
|
||||
if (canAppendLine(notice)) {
|
||||
lines.push(notice);
|
||||
break;
|
||||
}
|
||||
|
||||
const removed = lines.pop();
|
||||
if (removed) {
|
||||
totalChars -= 1 + removed.length;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return lines.join("\n");
|
||||
}
|
||||
|
||||
@@ -212,12 +212,14 @@ export async function drainStream(
|
||||
onFirstMessage?: () => void,
|
||||
onChunkProcessed?: DrainStreamHook,
|
||||
contextTracker?: ContextTracker,
|
||||
seenSeqIdThreshold?: number | null,
|
||||
isResumeStream?: boolean,
|
||||
): Promise<DrainResult> {
|
||||
const startTime = performance.now();
|
||||
const requestStartTime = getStreamRequestStartTime(stream) ?? startTime;
|
||||
let hasLoggedTTFT = false;
|
||||
|
||||
const streamProcessor = new StreamProcessor();
|
||||
const streamProcessor = new StreamProcessor(seenSeqIdThreshold ?? null);
|
||||
|
||||
let stopReason: StopReasonType | null = null;
|
||||
let hasCalledFirstMessage = false;
|
||||
@@ -258,8 +260,6 @@ export async function drainStream(
|
||||
|
||||
try {
|
||||
for await (const chunk of stream) {
|
||||
// console.log("chunk", chunk);
|
||||
|
||||
// Check if abort generation changed (handleInterrupt ran while we were waiting)
|
||||
// This catches cases where the abort signal might not propagate correctly
|
||||
if ((buffers.abortGeneration || 0) !== startAbortGen) {
|
||||
@@ -385,7 +385,11 @@ export async function drainStream(
|
||||
// Preserve a stop reason already parsed from stream chunks (e.g. llm_api_error)
|
||||
// and only fall back to generic "error" when none is available.
|
||||
stopReason = streamProcessor.stopReason || "error";
|
||||
markIncompleteToolsAsCancelled(buffers, true, "stream_error");
|
||||
// skipMarkCurrentLine=true: if a resume follows, the resume stream will
|
||||
// finalize the streaming line with full text. Marking it finished now would
|
||||
// commit truncated content to static (emittedIdsRef) before resume can append.
|
||||
// drainStreamWithResume calls markCurrentLineAsFinished if no resume happens.
|
||||
markIncompleteToolsAsCancelled(buffers, true, "stream_error", true);
|
||||
queueMicrotask(refresh);
|
||||
} finally {
|
||||
// Persist chunk log to disk (one write per stream, not per chunk)
|
||||
@@ -445,7 +449,13 @@ export async function drainStream(
|
||||
const approval: ApprovalRequest | null = approvals[0] || null;
|
||||
streamProcessor.pendingApprovals.clear();
|
||||
|
||||
if (stopReason === "requires_approval" && approvals.length === 0) {
|
||||
if (
|
||||
stopReason === "requires_approval" &&
|
||||
approvals.length === 0 &&
|
||||
!isResumeStream
|
||||
) {
|
||||
// On resume streams, approval chunks are before starting_after and won't be replayed.
|
||||
// drainStreamWithResume carries them over from the original drain — this is expected.
|
||||
debugWarn(
|
||||
"drainStream",
|
||||
"No approvals collected despite requires_approval stop reason",
|
||||
@@ -488,6 +498,7 @@ export async function drainStreamWithResume(
|
||||
onFirstMessage?: () => void,
|
||||
onChunkProcessed?: DrainStreamHook,
|
||||
contextTracker?: ContextTracker,
|
||||
seenSeqIdThreshold?: number | null,
|
||||
): Promise<DrainResult> {
|
||||
const overallStartTime = performance.now();
|
||||
const streamRequestContext = getStreamRequestContext(stream);
|
||||
@@ -509,6 +520,7 @@ export async function drainStreamWithResume(
|
||||
onFirstMessage,
|
||||
onChunkProcessed,
|
||||
contextTracker,
|
||||
seenSeqIdThreshold,
|
||||
);
|
||||
|
||||
let runIdToResume = result.lastRunId ?? null;
|
||||
@@ -574,6 +586,9 @@ export async function drainStreamWithResume(
|
||||
abortSignal &&
|
||||
!abortSignal.aborted
|
||||
) {
|
||||
// Resume path: markCurrentLineAsFinished was skipped in the catch block.
|
||||
// If resume fails below, we call it in the catch. If no resume condition is
|
||||
// met (else branch), we call it there instead.
|
||||
// Preserve original state in case resume needs to merge or fails
|
||||
const originalFallbackError = result.fallbackError;
|
||||
const originalApprovals = result.approvals;
|
||||
@@ -639,6 +654,8 @@ export async function drainStreamWithResume(
|
||||
undefined,
|
||||
onChunkProcessed,
|
||||
contextTracker,
|
||||
seenSeqIdThreshold,
|
||||
true, // isResumeStream
|
||||
);
|
||||
|
||||
// Use the resume result (should have proper stop_reason now)
|
||||
@@ -663,8 +680,9 @@ export async function drainStreamWithResume(
|
||||
result.approval = originalApproval;
|
||||
}
|
||||
} catch (resumeError) {
|
||||
// Resume failed - stick with the error stop_reason
|
||||
// Restore the original stream error for display
|
||||
// Resume failed - finalize the streaming line now (skipped in catch block above)
|
||||
markCurrentLineAsFinished(buffers);
|
||||
// Stick with the error stop_reason and restore the original stream error for display
|
||||
result.fallbackError = originalFallbackError;
|
||||
|
||||
const resumeErrorMsg =
|
||||
@@ -697,6 +715,8 @@ export async function drainStreamWithResume(
|
||||
|
||||
// Only log if we actually skipped for a reason (i.e., we didn't enter the resume branch above)
|
||||
if (skipReasons.length > 0) {
|
||||
// No resume — finalize the streaming line now (was skipped in catch block)
|
||||
markCurrentLineAsFinished(buffers);
|
||||
debugLog(
|
||||
"stream",
|
||||
"Mid-stream resume skipped: %s",
|
||||
|
||||
@@ -41,9 +41,21 @@ export class StreamProcessor {
|
||||
public lastSeqId: number | null = null;
|
||||
public stopReason: StopReasonType | null = null;
|
||||
|
||||
constructor(private readonly seenSeqIdThreshold: number | null = null) {}
|
||||
|
||||
processChunk(chunk: LettaStreamingResponse): ChunkProcessingResult {
|
||||
let errorInfo: ErrorInfo | undefined;
|
||||
let updatedApproval: ApprovalRequest | undefined;
|
||||
|
||||
if (
|
||||
"seq_id" in chunk &&
|
||||
chunk.seq_id != null &&
|
||||
this.seenSeqIdThreshold != null &&
|
||||
chunk.seq_id <= this.seenSeqIdThreshold
|
||||
) {
|
||||
return { shouldOutput: false };
|
||||
}
|
||||
|
||||
// Store the run_id (for error reporting) and seq_id (for stream resumption)
|
||||
// Capture run_id even if seq_id is missing - we need it for error details
|
||||
if ("run_id" in chunk && chunk.run_id) {
|
||||
|
||||
@@ -35,6 +35,7 @@ export function getDisplayToolName(rawName: string): string {
|
||||
if (rawName === "list_dir") return "LS";
|
||||
if (rawName === "grep_files") return "Search";
|
||||
if (rawName === "apply_patch") return "Patch";
|
||||
if (rawName === "memory_apply_patch") return "Memory Patch";
|
||||
|
||||
// Codex toolset (PascalCase)
|
||||
if (rawName === "UpdatePlan") return "Planning";
|
||||
|
||||
@@ -1316,6 +1316,7 @@ export async function handleHeadlessCommand(
|
||||
const approvalInput: ApprovalCreate = {
|
||||
type: "approval",
|
||||
approvals: executedResults as ApprovalResult[],
|
||||
otid: randomUUID(),
|
||||
};
|
||||
|
||||
// Inject queued skill content as user message parts (LET-7353)
|
||||
@@ -1335,6 +1336,7 @@ export async function handleHeadlessCommand(
|
||||
type: "text" as const,
|
||||
text: sc.content,
|
||||
})),
|
||||
otid: randomUUID(),
|
||||
});
|
||||
}
|
||||
}
|
||||
@@ -1462,8 +1464,16 @@ ${SYSTEM_REMINDER_CLOSE}
|
||||
{
|
||||
role: "user",
|
||||
content: contentParts,
|
||||
otid: randomUUID(),
|
||||
},
|
||||
];
|
||||
const refreshCurrentInputOtids = () => {
|
||||
// Terminal stop-reason retries are NEW requests and must not reuse OTIDs.
|
||||
currentInput = currentInput.map((item) => ({
|
||||
...item,
|
||||
otid: randomUUID(),
|
||||
}));
|
||||
};
|
||||
|
||||
// Track lastRunId outside the while loop so it's available in catch block
|
||||
let lastKnownRunId: string | null = null;
|
||||
@@ -1514,6 +1524,7 @@ ${SYSTEM_REMINDER_CLOSE}
|
||||
type: "text" as const,
|
||||
text: sc.content,
|
||||
})),
|
||||
otid: randomUUID(),
|
||||
},
|
||||
];
|
||||
}
|
||||
@@ -1569,12 +1580,11 @@ ${SYSTEM_REMINDER_CLOSE}
|
||||
continue;
|
||||
}
|
||||
|
||||
// Check for 409 "conversation busy" error - retry once with delay
|
||||
// TODO: Add pre-stream resume logic for parity with App.tsx.
|
||||
// Before waiting, attempt to discover the in-flight run via
|
||||
// discoverFallbackRunIdWithTimeout() and resume its stream with
|
||||
// client.runs.messages.stream() + drainStream(). See App.tsx
|
||||
// retry_conversation_busy handler for reference implementation.
|
||||
// Check for 409 "conversation busy" - wait and retry.
|
||||
// Stream resume is not attempted here: without OTID validation we
|
||||
// cannot confirm the in-flight run belongs to this request (e.g. two
|
||||
// terminals on the same agent). App.tsx handles resume with proper
|
||||
// context via discoverFallbackRunIdWithTimeout.
|
||||
if (preStreamAction === "retry_conversation_busy") {
|
||||
conversationBusyRetries += 1;
|
||||
const retryDelayMs = getRetryDelayMs({
|
||||
@@ -1582,11 +1592,10 @@ ${SYSTEM_REMINDER_CLOSE}
|
||||
attempt: conversationBusyRetries,
|
||||
});
|
||||
|
||||
// Emit retry message for stream-json mode
|
||||
if (outputFormat === "stream-json") {
|
||||
const retryMsg: RetryMessage = {
|
||||
type: "retry",
|
||||
reason: "error", // 409 conversation busy is a pre-stream error
|
||||
reason: "error",
|
||||
attempt: conversationBusyRetries,
|
||||
max_attempts: CONVERSATION_BUSY_MAX_RETRIES,
|
||||
delay_ms: retryDelayMs,
|
||||
@@ -1600,7 +1609,6 @@ ${SYSTEM_REMINDER_CLOSE}
|
||||
);
|
||||
}
|
||||
|
||||
// Wait before retry
|
||||
await new Promise((resolve) => setTimeout(resolve, retryDelayMs));
|
||||
continue;
|
||||
}
|
||||
@@ -1918,12 +1926,12 @@ ${SYSTEM_REMINDER_CLOSE}
|
||||
);
|
||||
|
||||
// Send all results in one batch
|
||||
currentInput = [
|
||||
{
|
||||
type: "approval",
|
||||
approvals: executedResults as ApprovalResult[],
|
||||
},
|
||||
];
|
||||
const approvalInputWithOtid = {
|
||||
type: "approval" as const,
|
||||
approvals: executedResults as ApprovalResult[],
|
||||
otid: randomUUID(),
|
||||
};
|
||||
currentInput = [approvalInputWithOtid];
|
||||
continue;
|
||||
}
|
||||
|
||||
@@ -1979,6 +1987,8 @@ ${SYSTEM_REMINDER_CLOSE}
|
||||
// Exponential backoff before retrying the same input
|
||||
await new Promise((resolve) => setTimeout(resolve, delayMs));
|
||||
|
||||
// Post-stream retry creates a new run/request.
|
||||
refreshCurrentInputOtids();
|
||||
continue;
|
||||
}
|
||||
}
|
||||
@@ -2092,6 +2102,7 @@ ${SYSTEM_REMINDER_CLOSE}
|
||||
const nudgeMessage: MessageCreate = {
|
||||
role: "system",
|
||||
content: `<system-reminder>The previous response was empty. Please provide a response with either text content or a tool call.</system-reminder>`,
|
||||
otid: randomUUID(),
|
||||
};
|
||||
currentInput = [...currentInput, nudgeMessage];
|
||||
}
|
||||
@@ -2115,6 +2126,8 @@ ${SYSTEM_REMINDER_CLOSE}
|
||||
}
|
||||
|
||||
await new Promise((resolve) => setTimeout(resolve, delayMs));
|
||||
// Empty-response retry creates a new run/request.
|
||||
refreshCurrentInputOtids();
|
||||
continue;
|
||||
}
|
||||
|
||||
@@ -2148,6 +2161,8 @@ ${SYSTEM_REMINDER_CLOSE}
|
||||
}
|
||||
|
||||
await new Promise((resolve) => setTimeout(resolve, delayMs));
|
||||
// Post-stream retry creates a new run/request.
|
||||
refreshCurrentInputOtids();
|
||||
continue;
|
||||
}
|
||||
} catch (_e) {
|
||||
@@ -2630,6 +2645,7 @@ async function runBidirectionalMode(
|
||||
const approvalInput: ApprovalCreate = {
|
||||
type: "approval",
|
||||
approvals: executedResults as ApprovalResult[],
|
||||
otid: randomUUID(),
|
||||
};
|
||||
|
||||
const approvalMessages: Array<
|
||||
@@ -2649,6 +2665,7 @@ async function runBidirectionalMode(
|
||||
type: "text" as const,
|
||||
text: sc.content,
|
||||
})),
|
||||
otid: randomUUID(),
|
||||
});
|
||||
}
|
||||
}
|
||||
@@ -3051,6 +3068,7 @@ async function runBidirectionalMode(
|
||||
const approvalInput: ApprovalCreate = {
|
||||
type: "approval",
|
||||
approvals: executedResults as ApprovalResult[],
|
||||
otid: randomUUID(),
|
||||
};
|
||||
const approvalStream = await sendMessageStream(
|
||||
targetConversationId,
|
||||
@@ -3778,12 +3796,12 @@ async function runBidirectionalMode(
|
||||
);
|
||||
|
||||
// Send approval results back to continue
|
||||
currentInput = [
|
||||
{
|
||||
type: "approval",
|
||||
approvals: executedResults,
|
||||
} as unknown as MessageCreate,
|
||||
];
|
||||
const approvalInputWithOtid = {
|
||||
type: "approval" as const,
|
||||
approvals: executedResults,
|
||||
otid: randomUUID(),
|
||||
};
|
||||
currentInput = [approvalInputWithOtid as unknown as MessageCreate];
|
||||
|
||||
// Continue the loop to process the next stream
|
||||
continue;
|
||||
|
||||
@@ -189,6 +189,16 @@ export async function computeDiffPreviews(
|
||||
}
|
||||
// Delete operations don't produce diffs
|
||||
}
|
||||
} else if (toolName === "memory_apply_patch" && toolArgs.input) {
|
||||
const operations = parsePatchOperations(toolArgs.input as string);
|
||||
for (const op of operations) {
|
||||
if (op.kind === "add" || op.kind === "update") {
|
||||
const result = parsePatchToAdvancedDiff(op.patchLines, op.path);
|
||||
if (result) {
|
||||
previews.push(toDiffPreview(result, basename(op.path)));
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
} catch {
|
||||
// Ignore diff computation errors — return whatever we have so far
|
||||
|
||||
94
src/index.ts
94
src/index.ts
@@ -267,7 +267,7 @@ function getModelForToolLoading(
|
||||
*/
|
||||
async function resolveAgentByName(
|
||||
name: string,
|
||||
): Promise<{ id: string; name: string } | null> {
|
||||
): Promise<{ id: string; name: string; agent: AgentState } | null> {
|
||||
const client = await getClient();
|
||||
|
||||
// Get all pinned agents (local first, then global, deduplicated)
|
||||
@@ -280,7 +280,7 @@ async function resolveAgentByName(
|
||||
}
|
||||
|
||||
// Fetch names for all pinned agents and find matches
|
||||
const matches: { id: string; name: string }[] = [];
|
||||
const matches: { id: string; name: string; agent: AgentState }[] = [];
|
||||
const normalizedSearchName = name.toLowerCase();
|
||||
|
||||
await Promise.all(
|
||||
@@ -288,7 +288,7 @@ async function resolveAgentByName(
|
||||
try {
|
||||
const agent = await client.agents.retrieve(id);
|
||||
if (agent.name?.toLowerCase() === normalizedSearchName) {
|
||||
matches.push({ id, name: agent.name });
|
||||
matches.push({ id, name: agent.name, agent });
|
||||
}
|
||||
} catch {
|
||||
// Agent not found or error, skip
|
||||
@@ -731,6 +731,7 @@ async function main(): Promise<void> {
|
||||
}
|
||||
|
||||
// Validate --name flag
|
||||
let nameResolvedAgent: AgentState | null = null;
|
||||
if (specifiedAgentName) {
|
||||
if (specifiedAgentId) {
|
||||
console.error("Error: --name cannot be used with --agent");
|
||||
@@ -848,6 +849,7 @@ async function main(): Promise<void> {
|
||||
process.exit(1);
|
||||
}
|
||||
specifiedAgentId = resolved.id;
|
||||
nameResolvedAgent = resolved.agent;
|
||||
}
|
||||
|
||||
// Set tool filter if provided (controls which tools are loaded)
|
||||
@@ -950,6 +952,7 @@ async function main(): Promise<void> {
|
||||
initBlocks,
|
||||
baseTools,
|
||||
agentIdArg,
|
||||
preResolvedAgent,
|
||||
model,
|
||||
systemPromptPreset,
|
||||
toolset,
|
||||
@@ -961,6 +964,7 @@ async function main(): Promise<void> {
|
||||
initBlocks?: string[];
|
||||
baseTools?: string[];
|
||||
agentIdArg: string | null;
|
||||
preResolvedAgent?: AgentState | null;
|
||||
model?: string;
|
||||
systemPromptPreset?: string;
|
||||
toolset?: "auto" | "codex" | "default" | "gemini";
|
||||
@@ -995,7 +999,7 @@ async function main(): Promise<void> {
|
||||
>(null);
|
||||
// Cache agent object from Phase 1 validation to avoid redundant re-fetch in Phase 2
|
||||
const [validatedAgent, setValidatedAgent] = useState<AgentState | null>(
|
||||
null,
|
||||
preResolvedAgent ?? null,
|
||||
);
|
||||
// Track agent and conversation for conversation selector (--resume flag)
|
||||
const [resumeAgentId, setResumeAgentId] = useState<string | null>(null);
|
||||
@@ -1274,31 +1278,50 @@ async function main(): Promise<void> {
|
||||
// Step 1: Check local project LRU (session helpers centralize legacy fallback)
|
||||
// Cache the retrieved agent to avoid redundant re-fetch in init()
|
||||
const localAgentId = settingsManager.getLocalLastAgentId(process.cwd());
|
||||
const globalAgentId = settingsManager.getGlobalLastAgentId();
|
||||
|
||||
// Fetch local + global LRU agents in parallel
|
||||
let localAgentExists = false;
|
||||
let globalAgentExists = false;
|
||||
let cachedAgent: AgentState | null = null;
|
||||
if (localAgentId) {
|
||||
try {
|
||||
cachedAgent = await client.agents.retrieve(localAgentId);
|
||||
|
||||
if (globalAgentId && globalAgentId === localAgentId) {
|
||||
// Same agent — only need one fetch
|
||||
if (localAgentId) {
|
||||
try {
|
||||
cachedAgent = await client.agents.retrieve(localAgentId);
|
||||
localAgentExists = true;
|
||||
} catch {
|
||||
setFailedAgentMessage(
|
||||
`Unable to locate recently used agent ${localAgentId}`,
|
||||
);
|
||||
}
|
||||
}
|
||||
globalAgentExists = localAgentExists;
|
||||
} else {
|
||||
// Different agents — fetch in parallel
|
||||
const [localResult, globalResult] = await Promise.allSettled([
|
||||
localAgentId
|
||||
? client.agents.retrieve(localAgentId)
|
||||
: Promise.reject(new Error("no local")),
|
||||
globalAgentId
|
||||
? client.agents.retrieve(globalAgentId)
|
||||
: Promise.reject(new Error("no global")),
|
||||
]);
|
||||
|
||||
if (localResult.status === "fulfilled") {
|
||||
localAgentExists = true;
|
||||
} catch {
|
||||
cachedAgent = localResult.value;
|
||||
} else if (localAgentId) {
|
||||
setFailedAgentMessage(
|
||||
`Unable to locate recently used agent ${localAgentId}`,
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
// Step 2: Check global LRU (covers directory-switching case)
|
||||
const globalAgentId = settingsManager.getGlobalLastAgentId();
|
||||
let globalAgentExists = false;
|
||||
if (globalAgentId && globalAgentId !== localAgentId) {
|
||||
try {
|
||||
cachedAgent = await client.agents.retrieve(globalAgentId);
|
||||
if (globalResult.status === "fulfilled") {
|
||||
globalAgentExists = true;
|
||||
} catch {
|
||||
// Global agent doesn't exist either
|
||||
cachedAgent = globalResult.value;
|
||||
}
|
||||
} else if (globalAgentId && globalAgentId === localAgentId) {
|
||||
globalAgentExists = localAgentExists;
|
||||
}
|
||||
|
||||
// Step 3: Resolve startup target using pure decision logic
|
||||
@@ -1379,11 +1402,17 @@ async function main(): Promise<void> {
|
||||
|
||||
// Priority 1: --agent flag
|
||||
if (agentIdArg) {
|
||||
try {
|
||||
await client.agents.retrieve(agentIdArg);
|
||||
// Use cached agent from name resolution if available
|
||||
if (validatedAgent && validatedAgent.id === agentIdArg) {
|
||||
resumingAgentId = agentIdArg;
|
||||
} catch {
|
||||
// Agent doesn't exist, will create new later
|
||||
} else {
|
||||
try {
|
||||
const agent = await client.agents.retrieve(agentIdArg);
|
||||
setValidatedAgent(agent);
|
||||
resumingAgentId = agentIdArg;
|
||||
} catch {
|
||||
// Agent doesn't exist, will create new later
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1396,13 +1425,19 @@ async function main(): Promise<void> {
|
||||
// This takes precedence over stale LRU since user explicitly chose it
|
||||
const shouldCreateNew = forceNew || userRequestedNewAgent;
|
||||
if (!resumingAgentId && !shouldCreateNew && selectedGlobalAgentId) {
|
||||
try {
|
||||
await client.agents.retrieve(selectedGlobalAgentId);
|
||||
// Use cached agent from Phase 1 validation if available
|
||||
if (validatedAgent && validatedAgent.id === selectedGlobalAgentId) {
|
||||
resumingAgentId = selectedGlobalAgentId;
|
||||
} catch {
|
||||
// Selected agent doesn't exist - show selector again
|
||||
setLoadingState("selecting_global");
|
||||
return;
|
||||
} else {
|
||||
try {
|
||||
const agent = await client.agents.retrieve(selectedGlobalAgentId);
|
||||
setValidatedAgent(agent);
|
||||
resumingAgentId = selectedGlobalAgentId;
|
||||
} catch {
|
||||
// Selected agent doesn't exist - show selector again
|
||||
setLoadingState("selecting_global");
|
||||
return;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -2032,6 +2067,7 @@ async function main(): Promise<void> {
|
||||
initBlocks: initBlocks,
|
||||
baseTools: baseTools,
|
||||
agentIdArg: specifiedAgentId,
|
||||
preResolvedAgent: nameResolvedAgent,
|
||||
model: specifiedModel,
|
||||
systemPromptPreset: systemPromptPreset,
|
||||
toolset: specifiedToolset as
|
||||
|
||||
@@ -282,6 +282,7 @@ class PermissionModeManager {
|
||||
"MultiEdit",
|
||||
"NotebookEdit",
|
||||
"apply_patch",
|
||||
"memory_apply_patch",
|
||||
"replace",
|
||||
"write_file",
|
||||
].includes(toolName)
|
||||
@@ -299,6 +300,9 @@ class PermissionModeManager {
|
||||
"Grep",
|
||||
"NotebookRead",
|
||||
"TodoWrite",
|
||||
// Image tools (read-only)
|
||||
"ViewImage",
|
||||
"view_image",
|
||||
// Plan mode tools (must allow exit!)
|
||||
"ExitPlanMode",
|
||||
"exit_plan_mode",
|
||||
@@ -339,6 +343,7 @@ class PermissionModeManager {
|
||||
// Codex toolset (snake_case and PascalCase)
|
||||
"apply_patch",
|
||||
"ApplyPatch",
|
||||
"memory_apply_patch",
|
||||
// Gemini toolset (snake_case and PascalCase)
|
||||
"write_file_gemini",
|
||||
"WriteFileGemini",
|
||||
@@ -362,7 +367,9 @@ class PermissionModeManager {
|
||||
|
||||
// ApplyPatch/apply_patch: extract all file directives.
|
||||
if (
|
||||
(toolName === "ApplyPatch" || toolName === "apply_patch") &&
|
||||
(toolName === "ApplyPatch" ||
|
||||
toolName === "apply_patch" ||
|
||||
toolName === "memory_apply_patch") &&
|
||||
toolArgs?.input
|
||||
) {
|
||||
const input = toolArgs.input as string;
|
||||
|
||||
132
src/skills/builtin/context_doctor/SKILL.md
Normal file
132
src/skills/builtin/context_doctor/SKILL.md
Normal file
@@ -0,0 +1,132 @@
|
||||
---
|
||||
name: Context Doctor
|
||||
id: context_doctor
|
||||
description: Identify and repair degradation in system prompt, external memory, and skills preventing you from following instructions or remembering information as well as you should.
|
||||
---
|
||||
|
||||
# Context Doctor
|
||||
Your context is managed by yourself, along with additional memory subagents. Your context includes:
|
||||
- Your system prompt and instructions (contained in `system/`)
|
||||
- Your external memory
|
||||
- Your skills (procedural memory)
|
||||
|
||||
Over time, context can degrade — bloat and poor prompt quality erode your ability to remember the right things and follow instructions properly. This skill helps you identify issues with your context and repair them collaboratively with the user.
|
||||
|
||||
## Operating Procedure
|
||||
|
||||
### Step 1: Identifying and resolving context issues
|
||||
Explore your memory files to identify issues. Consider what is confusing about your own prompts and context, and resolve the issues.
|
||||
|
||||
Below are additional common issues with context and how they can be resolved:
|
||||
|
||||
### Context quality
|
||||
Your system prompt and memory filesystem should be well structured and clear.
|
||||
|
||||
**Questions to ask**:
|
||||
- Is my system prompt clear and well formatted?
|
||||
- Are there wasteful or unnecessary tokens in my prompts?
|
||||
- Do I know when to load which files in my memory filesystem?
|
||||
|
||||
#### System prompt bloat
|
||||
Prompts that are compiled as part of the system prompt (contained in `system/`) should only take up about 10% of the total context size, though this is a recommendation, not a hard requirement. Usually this means about 15-20k tokens.
|
||||
|
||||
Use the following script to evaluate the token usage of the system prompt:
|
||||
```bash
|
||||
bun scripts/estimate_system_tokens.ts --memory-dir "$MEMORY_DIR"
|
||||
```
|
||||
|
||||
**Questions to ask**:
|
||||
- Do all these tokens need to be passed to the LLM on every turn, or can they be retrieved when needed through being part of external memory of my conversation history?
|
||||
- Do any of these prompts confuse or distract me?
|
||||
- Am I able to effectively follow critical instructions (e.g. persona information, user preferences) given the current prompt structure and contents?
|
||||
|
||||
**Solution**: Reduce the size of the system prompt if needed:
|
||||
- Move files outside of `system/` so they are no longer part of the system prompt
|
||||
- Compact information to be more information dense or eliminate redundancy
|
||||
- Leverage progressive disclosure: move some context outside of `system/` and reference it to pull in dynamically
|
||||
|
||||
**Scope**: You may refine, tighten, and restructure prompts to improve clarity and adherence — but do not change the intended semantics. The goal is better signal, not different behavior.
|
||||
- Do not alter persona-defining content (who you are, how you communicate)
|
||||
- Do not remove or change user identity or preferences (e.g. the human's name, their stated goals)
|
||||
- Do not rewrite instructions in ways that shift their meaning — only reduce noise and improve structure
|
||||
|
||||
#### Context redundancy and unclear organization
|
||||
The context in the memory filesystem should have a clear structure, with a well-defined purpose for each file. Memory file descriptions should be precise and non-overlapping. Their contents should be consistent with the description, and have non-overlapping content to other files.
|
||||
|
||||
**Questions to ask**:
|
||||
- Do the descriptions make clear what file is for what?
|
||||
- Do the contents of the file match the descriptions? (you can ask subagents to check)
|
||||
|
||||
**Solution**: Read all memory files (use subagents for efficiency), then:
|
||||
- Consolidate redundant files
|
||||
- Reorganize files and rewrite descriptions to have clear separation of concerns
|
||||
- Avoid duplication by referencing common files from multiple places (e.g. `[[reference/api]]`)
|
||||
- Rewrite unclear or low-quality content
|
||||
|
||||
#### Invalid context format
|
||||
Files in the memory filesystem must follow certain structural requirements:
|
||||
- Must have a `system/persona.md`
|
||||
- Must NOT have overlapping file and folder names (e.g. `system/human.md` and `system/human/identity.md`)
|
||||
- Must follow specification for skills (e.g. `skills/{skill_name}/`) with the format:
|
||||
```
|
||||
skill-name/
|
||||
├── SKILL.md # Required: metadata + instructions
|
||||
├── scripts/ # Optional: executable code
|
||||
├── references/ # Optional: documentation
|
||||
├── assets/ # Optional: templates, resources
|
||||
└── ... # Any additional files or directories
|
||||
```
|
||||
|
||||
**Solution**: Reorganize files to follow the required structure
|
||||
|
||||
### Poor use of progressive disclosure
|
||||
Only critical information should be in the system prompt, since it's passed on every turn. Use progressive disclosure so that context only *sometimes* needed can be dynamically retrieved.
|
||||
|
||||
Files that are outside of `system/` are not part of the system prompt, and must be dynamically loaded. You must index your files to ensure your future self can discover them: for example, make sure that files have informative names and descriptions, or are referenced from parts of your system prompt. Otherwise, you will never discover the external context or make use of it.
|
||||
|
||||
**Solution**:
|
||||
- Reference external skills from the relevant parts of in-context memory:
|
||||
```
|
||||
When running a migration, always use the skill [[skills/db-migrations]]
|
||||
```
|
||||
or external memory files:
|
||||
```
|
||||
Sarah's active projects are: Letta Code [[projects/letta_code.md]] and Letta Cloud [[projects/letta_cloud]]
|
||||
```
|
||||
- Ensure that contents of files match the file name and descriptions
|
||||
- Make sure your future self will be able to find and load external files when needed.
|
||||
|
||||
### Step 2: Implement context fixes
|
||||
Create a plan for what fixes you want to make, then implement them.
|
||||
|
||||
Before moving on, verify:
|
||||
- [ ] System prompt token budget reviewed (target ~10% of context, usually 15-20k tokens)
|
||||
- [ ] No overlapping or redundant files remain
|
||||
- [ ] All file descriptions are unique, accurate, and match their contents
|
||||
- [ ] Moved-out knowledge has references from in-context memory so it can be discovered
|
||||
- [ ] No semantic changes to persona, user identity, or behavioral instructions
|
||||
|
||||
### Step 3: Commit and push
|
||||
Review changes, then commit with a descriptive message:
|
||||
|
||||
```bash
|
||||
cd $MEMORY_DIR
|
||||
git status # Review what changed before staging
|
||||
git add <specific files> # Stage targeted paths — avoid blind `git add -A`
|
||||
git commit --author="<AGENT_NAME> <<ACTUAL_AGENT_ID>@letta.com>" -m "fix(doctor): <summary> 🏥
|
||||
|
||||
<identified issues and implemented solutions>"
|
||||
|
||||
git push
|
||||
```
|
||||
|
||||
### Step 4: Final checklist and message
|
||||
Tell the user what issues you identitied, the fixes you made, the commit you made, and also recommend that they run `/recompile` to apply these changes to the current system prompt.
|
||||
|
||||
Before finishing make sure you:
|
||||
- [ ] Resolved all the identified context issues
|
||||
- [ ] Pushed your changes successfully
|
||||
- [ ] Told the user to run `/recompile` to refresh the system prompt and apply changes
|
||||
|
||||
## Critical information
|
||||
- **Ask the user about their goals for you, not the implementation**: You understand your own context best, and should follow the guidelines in this document. Do NOT ask the user about their structural preferences - the context is for YOU, not them. Ask them how they want YOU to behave or know instead.
|
||||
181
src/skills/builtin/context_doctor/scripts/estimate_system_tokens.ts
Executable file
181
src/skills/builtin/context_doctor/scripts/estimate_system_tokens.ts
Executable file
@@ -0,0 +1,181 @@
|
||||
#!/usr/bin/env bun
|
||||
|
||||
import { existsSync, readdirSync, readFileSync } from "node:fs";
|
||||
import { join } from "node:path";
|
||||
import { getClient } from "../../../../agent/client";
|
||||
import { settingsManager } from "../../../../settings-manager";
|
||||
|
||||
const BYTES_PER_TOKEN = 4;
|
||||
|
||||
type FileEstimate = {
|
||||
path: string;
|
||||
tokens: number;
|
||||
};
|
||||
|
||||
type ParsedArgs = {
|
||||
memoryDir?: string;
|
||||
agentId?: string;
|
||||
top: number;
|
||||
};
|
||||
|
||||
function parseArgs(argv: string[]): ParsedArgs {
|
||||
const parsed: ParsedArgs = { top: 20 };
|
||||
|
||||
for (let i = 0; i < argv.length; i++) {
|
||||
const arg = argv[i];
|
||||
if (arg === "--memory-dir") {
|
||||
parsed.memoryDir = argv[i + 1];
|
||||
i++;
|
||||
continue;
|
||||
}
|
||||
if (arg === "--agent-id") {
|
||||
parsed.agentId = argv[i + 1];
|
||||
i++;
|
||||
continue;
|
||||
}
|
||||
if (arg === "--top") {
|
||||
const raw = argv[i + 1];
|
||||
const value = Number.parseInt(raw ?? "", 10);
|
||||
if (!Number.isNaN(value) && value >= 0) {
|
||||
parsed.top = value;
|
||||
}
|
||||
i++;
|
||||
}
|
||||
}
|
||||
|
||||
return parsed;
|
||||
}
|
||||
|
||||
function estimateTokens(text: string): number {
|
||||
return Math.ceil(Buffer.byteLength(text, "utf8") / BYTES_PER_TOKEN);
|
||||
}
|
||||
|
||||
function normalizePath(value: string): string {
|
||||
return value.replaceAll("\\", "/");
|
||||
}
|
||||
|
||||
function walkMarkdownFiles(dir: string): string[] {
|
||||
if (!existsSync(dir)) {
|
||||
return [];
|
||||
}
|
||||
|
||||
const out: string[] = [];
|
||||
const entries = readdirSync(dir, { withFileTypes: true });
|
||||
|
||||
for (const entry of entries) {
|
||||
if (entry.name.startsWith(".")) {
|
||||
continue;
|
||||
}
|
||||
const full = join(dir, entry.name);
|
||||
if (entry.isDirectory()) {
|
||||
if (entry.name === ".git") {
|
||||
continue;
|
||||
}
|
||||
out.push(...walkMarkdownFiles(full));
|
||||
continue;
|
||||
}
|
||||
if (entry.isFile() && entry.name.endsWith(".md")) {
|
||||
out.push(full);
|
||||
}
|
||||
}
|
||||
|
||||
return out;
|
||||
}
|
||||
|
||||
function inferAgentIdFromMemoryDir(memoryDir: string): string | null {
|
||||
const parts = normalizePath(memoryDir).split("/");
|
||||
for (let i = 0; i < parts.length - 1; i++) {
|
||||
if (parts[i] === "agents" && parts[i + 1]?.startsWith("agent-")) {
|
||||
return parts[i + 1];
|
||||
}
|
||||
}
|
||||
|
||||
const maybe = parts.at(-2);
|
||||
return maybe?.startsWith("agent-") ? maybe : null;
|
||||
}
|
||||
|
||||
async function resolveAgentId(
|
||||
memoryDir: string,
|
||||
cliAgentId?: string,
|
||||
): Promise<string> {
|
||||
if (cliAgentId) {
|
||||
return cliAgentId;
|
||||
}
|
||||
|
||||
if (process.env.AGENT_ID) {
|
||||
return process.env.AGENT_ID;
|
||||
}
|
||||
|
||||
const inferred = inferAgentIdFromMemoryDir(memoryDir);
|
||||
if (inferred) {
|
||||
return inferred;
|
||||
}
|
||||
|
||||
const fromSession = settingsManager.getEffectiveLastAgentId(process.cwd());
|
||||
if (fromSession) {
|
||||
return fromSession;
|
||||
}
|
||||
|
||||
throw new Error(
|
||||
"Unable to resolve agent ID. Pass --agent-id or set AGENT_ID.",
|
||||
);
|
||||
}
|
||||
|
||||
function formatNumber(value: number): string {
|
||||
return value.toLocaleString("en-US");
|
||||
}
|
||||
|
||||
async function main(): Promise<number> {
|
||||
await settingsManager.initialize();
|
||||
|
||||
const args = parseArgs(process.argv.slice(2));
|
||||
const memoryDir = args.memoryDir || process.env.MEMORY_DIR;
|
||||
|
||||
if (!memoryDir) {
|
||||
throw new Error("Missing memory dir. Pass --memory-dir or set MEMORY_DIR.");
|
||||
}
|
||||
|
||||
const systemDir = join(memoryDir, "system");
|
||||
if (!existsSync(systemDir)) {
|
||||
throw new Error(`Missing system directory: ${systemDir}`);
|
||||
}
|
||||
|
||||
const agentId = await resolveAgentId(memoryDir, args.agentId);
|
||||
|
||||
// Use the SDK auth path used by letta-code (OAuth + API key handling via getClient).
|
||||
const client = await getClient();
|
||||
await client.agents.retrieve(agentId);
|
||||
|
||||
const files = walkMarkdownFiles(systemDir).sort();
|
||||
const rows: FileEstimate[] = [];
|
||||
|
||||
for (const filePath of files) {
|
||||
const text = readFileSync(filePath, "utf8");
|
||||
const rel = normalizePath(filePath.slice(memoryDir.length + 1));
|
||||
rows.push({ path: rel, tokens: estimateTokens(text) });
|
||||
}
|
||||
|
||||
const estimatedTotalTokens = rows.reduce((sum, row) => sum + row.tokens, 0);
|
||||
|
||||
console.log("Estimated total tokens");
|
||||
console.log(` ${formatNumber(estimatedTotalTokens)}`);
|
||||
|
||||
console.log("\nPer-file token estimates");
|
||||
console.log(` ${"tokens".padStart(8)} path`);
|
||||
|
||||
const sortedRows = [...rows].sort((a, b) => b.tokens - a.tokens);
|
||||
for (const row of sortedRows.slice(0, Math.max(0, args.top))) {
|
||||
console.log(` ${formatNumber(row.tokens).padStart(8)} ${row.path}`);
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
main()
|
||||
.then((code) => {
|
||||
process.exit(code);
|
||||
})
|
||||
.catch((error: unknown) => {
|
||||
console.error(error instanceof Error ? error.message : String(error));
|
||||
process.exit(1);
|
||||
});
|
||||
@@ -16,23 +16,23 @@ If you are running as a background subagent (you cannot use AskUserQuestion):
|
||||
- Use reasonable defaults for all preferences
|
||||
- Any specific overrides will be provided in your initial prompt
|
||||
|
||||
## Your Goal: Explode Into 15-25 Hierarchical Files
|
||||
## Your Goal: Organize Into a Hierarchical Memory Structure
|
||||
|
||||
Your goal is to **explode** memory into a **deeply hierarchical structure of 15-25 small, focused files**.
|
||||
Your goal is to **organize** memory into a **deeply hierarchical structure of small, focused files**.
|
||||
|
||||
### Target Output
|
||||
|
||||
| Metric | Target |
|
||||
|--------|--------|
|
||||
| **Total files** | 15-25 (aim for ~20) |
|
||||
| **Max lines per file** | ~40 lines (split if larger) |
|
||||
| **Hierarchy depth** | 2-3 levels using `/` naming (e.g., `project/tooling/bun.md`) |
|
||||
| **Total files** | Enough focused files to cover distinct concepts without bloating any single file |
|
||||
| **File size** | Keep files concise and split when they become unwieldy |
|
||||
| **Hierarchy depth** | Use nested `/` paths whenever they improve clarity (e.g., `project/tooling/bun.md`) |
|
||||
| **Nesting requirement** | Every new file MUST be nested under a parent using `/` |
|
||||
|
||||
**Anti-patterns to avoid:**
|
||||
- ❌ Ending with only 3-5 large files
|
||||
- ❌ Ending with only a few large files
|
||||
- ❌ Flat naming (all files at top level)
|
||||
- ❌ Mega-files with 10+ sections
|
||||
- ❌ Mega-files with many unrelated sections
|
||||
|
||||
## Memory Filesystem Integration
|
||||
|
||||
@@ -45,27 +45,27 @@ Your memory is a git-backed filesystem at `~/.letta/agents/<agent-id>/`. The act
|
||||
- The filesystem tree (all file paths + metadata) is always visible regardless of location
|
||||
- You can use bash commands (`ls`, `mkdir`, `mv`, `git`) to organize files
|
||||
- You MUST create a **deeply hierarchical file structure** — flat naming is NOT acceptable
|
||||
- **Target: 15-25 files in system/**, with additional reference files outside as needed
|
||||
- Create as many files as needed for clarity in `system/`, with additional reference files outside as needed
|
||||
|
||||
**MANDATORY principles for hierarchical organization:**
|
||||
|
||||
| Requirement | Target |
|
||||
|-------------|--------|
|
||||
| **Total files** | 15-25 files (aim for ~20) |
|
||||
| **Max lines per file** | ~40 lines (split if larger) |
|
||||
| **Hierarchy depth** | 2-3 levels using `/` naming |
|
||||
| **Total files** | Enough focused files to avoid monoliths while staying maintainable |
|
||||
| **File size** | Keep files concise and split when they become unwieldy |
|
||||
| **Hierarchy depth** | Use meaningful nesting with `/` naming where it helps organization |
|
||||
| **Nesting requirement** | EVERY new file MUST use `/` naming (no flat files) |
|
||||
|
||||
**Anti-patterns to avoid:**
|
||||
- ❌ Creating only 3-5 large files
|
||||
- ❌ Creating only a few large files
|
||||
- ❌ Flat naming (all files at top level like `project-commands.md`)
|
||||
- ❌ Mega-files with 10+ sections
|
||||
- ❌ Mega-files with many unrelated sections
|
||||
|
||||
**Rules:**
|
||||
- Use **2-3 levels of nesting** for ALL files (e.g., `project/tooling/bun.md`)
|
||||
- Keep files **focused and small** (~40 lines max per file)
|
||||
- Use clear nested paths for files whenever hierarchy improves discoverability (e.g., `project/tooling/bun.md`)
|
||||
- Keep files **focused and concise**
|
||||
- Use **descriptive paths** that make sense when you see just the filename
|
||||
- Split when a file has **2+ concepts** (be aggressive)
|
||||
- Split when a file starts mixing multiple concepts (be aggressive)
|
||||
|
||||
**Example target structure (what success looks like):**
|
||||
|
||||
@@ -93,7 +93,7 @@ system/
|
||||
└── behavior.md # How to behave
|
||||
```
|
||||
|
||||
This example has **~20 files** with **3 levels of hierarchy**. Your output should look similar.
|
||||
This example is illustrative. Your output should match the project’s actual complexity and the user’s needs.
|
||||
|
||||
This approach makes memory more **scannable**, **maintainable**, and **shareable** with other agents.
|
||||
|
||||
@@ -319,7 +319,7 @@ You should ask these questions at the start (bundle them together in one AskUser
|
||||
2. **Identity**: "Which contributor are you?" (You can often infer this from git logs - e.g., if git shows "cpacker" as a top contributor, ask "Are you cpacker?")
|
||||
3. **Related repos**: "Are there other repositories I should know about and consider in my research?" (e.g., backend monorepo, shared libraries)
|
||||
4. **Historical sessions** (include this question if history data was found in step 2): "I found Claude Code / Codex history on your machine. Should I analyze it to learn your preferences, coding patterns, and project context? This significantly improves how I work with you but uses additional time and tokens." Options: "Yes, analyze history" / "Skip for now". Use "History" as the header.
|
||||
5. **Memory updates**: "How often should I check if I should update my memory?" with options "Frequent (every 3-5 turns)" and "Occasional (every 8-10 turns)". This should be a binary question with "Memory" as the header.
|
||||
5. **Memory updates**: "How often should I check whether to update memory?" with options "Frequent" and "Occasional". This should be a binary question with "Memory" as the header.
|
||||
6. **Communication style**: "Terse or detailed responses?"
|
||||
7. **Any specific rules**: "Rules I should always follow?"
|
||||
|
||||
@@ -364,11 +364,11 @@ mkdir -p ~/.letta/agents/<agent-id>/memory/system/human/prefs
|
||||
- **Every new file MUST use `/` naming** - no flat files allowed
|
||||
- Use `/` for hierarchy: `project/tooling/testing` (not `project-tooling-testing`)
|
||||
- File path determines the memory label: `system/project/overview.md` → label `project/overview`
|
||||
- Keep files small and focused (~40 lines max)
|
||||
- Keep files small and focused
|
||||
- Use **descriptive frontmatter** — the `description` field helps your future self understand each file's purpose
|
||||
|
||||
**Checkpoint before proceeding:**
|
||||
Count your proposed files. **If you have fewer than 15 files, go back and split more aggressively.**
|
||||
Review your proposed files and split further if the structure still feels too flat or monolithic.
|
||||
|
||||
**Benefits:**
|
||||
- More scannable and maintainable
|
||||
@@ -376,17 +376,17 @@ Count your proposed files. **If you have fewer than 15 files, go back and split
|
||||
- Natural progressive disclosure (load parent, then drill into children)
|
||||
- Works like a file system you're familiar with
|
||||
|
||||
### Split Aggressively - Target 15-25 Files
|
||||
### Split Aggressively
|
||||
|
||||
**Don't create monolithic files.** Your goal is **15-25 total files**. Be aggressive about splitting:
|
||||
**Don't create monolithic files.** Be aggressive about splitting when it improves clarity:
|
||||
|
||||
**Split when:**
|
||||
- A file has **40+ lines** (lower threshold than typical)
|
||||
- A file has **2+ distinct concepts** (not 3+, be aggressive)
|
||||
- A file becomes long enough that scanning it slows you down
|
||||
- A file mixes distinct concepts that would be clearer if separated
|
||||
- A section could stand alone as its own file
|
||||
- You can name the extracted content with a clear `/` path
|
||||
|
||||
If a file is getting long (>40 lines), split it:
|
||||
If a file is getting long or conceptually mixed, split it:
|
||||
|
||||
**Without memory filesystem** (flat naming - acceptable but not ideal):
|
||||
- `project-overview`: High-level description, tech stack, repo links
|
||||
@@ -402,7 +402,7 @@ If a file is getting long (>40 lines), split it:
|
||||
- `project/architecture`: Directory structure, key modules
|
||||
- `project/gotchas`: Footguns, things to watch out for
|
||||
- **Must further nest**: `project/tooling/testing`, `project/tooling/linting`, `project/tooling/bun`
|
||||
- **Target 15-25 files total** - if commands is long, split into `project/commands/dev`, `project/commands/build`, etc.
|
||||
- If commands are broad, split into focused files like `project/commands/dev`, `project/commands/build`, etc.
|
||||
|
||||
This makes memory more scannable and easier to update and share with other agents.
|
||||
|
||||
@@ -456,9 +456,9 @@ And add memory files that you think make sense to add (e.g., `project/architectu
|
||||
|
||||
9. **Create/update memory structure** (can happen incrementally alongside steps 7-8):
|
||||
- **With memfs enabled**: Create a deeply hierarchical file structure using bash commands
|
||||
- Use `mkdir -p` to create subdirectories (2-3 levels deep)
|
||||
- Use `mkdir -p` to create subdirectories as needed
|
||||
- Create `.md` files for memory files using `/` naming
|
||||
- **Target 15-25 total files** - be aggressive about splitting
|
||||
- Be aggressive about splitting when it improves clarity
|
||||
- Use nested paths like `project/tooling/testing.md` (never flat like `project-testing.md`)
|
||||
- **Every new file MUST be nested** under a parent using `/`
|
||||
- **Every new file MUST be nested** under a parent using `/`
|
||||
@@ -466,10 +466,9 @@ And add memory files that you think make sense to add (e.g., `project/architectu
|
||||
- **Don't wait until the end** - write findings as you go
|
||||
|
||||
**Checkpoint verification:**
|
||||
- After creating files, count them: `ls ~/.letta/agents/<agent-id>/memory/system/ | wc -l`
|
||||
- **If count < 15, you haven't split enough** - go back and split more
|
||||
- Check maximum depth: `find ~/.letta/agents/<agent-id>/memory/system/ -type f | awk -F/ '{print NF}' | sort -n | tail -1`
|
||||
- **Should be 2-3 levels deep** minimum
|
||||
- Review file count and shape: `find ~/.letta/agents/<agent-id>/memory/system/ -type f | wc -l`
|
||||
- Review hierarchy depth: `find ~/.letta/agents/<agent-id>/memory/system/ -type f | awk -F/ '{print NF}' | sort -n | tail -1`
|
||||
- Verify the structure feels appropriately granular and discoverable for this project
|
||||
|
||||
10. **Organize incrementally**:
|
||||
- Start with a basic structure
|
||||
@@ -487,14 +486,13 @@ And add memory files that you think make sense to add (e.g., `project/architectu
|
||||
|
||||
Before finishing, you MUST do a reflection step. **Your memory files are visible to you in your system prompt right now.** Look at them carefully and ask yourself:
|
||||
|
||||
1. **File count check**:
|
||||
- Count your memory files: `ls ~/.letta/agents/<agent-id>/memory/system/ | wc -l`
|
||||
- **Do you have 15-25 files?** If not, you haven't split enough
|
||||
- Too few files means they're too large - split more aggressively
|
||||
1. **File granularity check**:
|
||||
- Review your memory file set: `find ~/.letta/agents/<agent-id>/memory/system/ -type f | wc -l`
|
||||
- Ask whether any files are still too broad and should be split
|
||||
|
||||
2. **Hierarchy check**:
|
||||
- Are ALL new files using `/` naming? (e.g., `project/tooling/bun.md`)
|
||||
- Do you have 2-3 levels of nesting minimum?
|
||||
- Is the nesting meaningful for this project?
|
||||
- Are there any flat files like `project-commands.md`? **These should be nested**
|
||||
|
||||
3. **Redundancy check**: Are there files with overlapping content? Either literally overlapping (due to errors while editing), or semantically/conceptually overlapping?
|
||||
@@ -545,7 +543,7 @@ cat ~/.letta/agents/<agent-id>/memory/system/persona.md
|
||||
❌ `project_testing.md` (underscore instead of `/`)
|
||||
|
||||
```bash
|
||||
# Create deeply nested directory structure (2-3 levels)
|
||||
# Create a nested directory structure suited to the content
|
||||
mkdir -p ~/.letta/agents/<agent-id>/memory/system/project/{tooling,architecture,conventions}
|
||||
mkdir -p ~/.letta/agents/<agent-id>/memory/system/human/prefs
|
||||
mkdir -p ~/.letta/agents/<agent-id>/memory/system/persona/behavior
|
||||
@@ -584,22 +582,22 @@ mv ~/.letta/agents/<agent-id>/memory/system/project/tooling.md \
|
||||
|
||||
Before you tell the user you're done, confirm:
|
||||
|
||||
- [ ] **File count is 15-25** — Count your files with `ls ~/.letta/agents/<agent-id>/memory/system/ | wc -l`. If < 15, split more.
|
||||
- [ ] **File granularity is appropriate** — verify no files are overly broad and split where useful.
|
||||
- [ ] **All new files use `/` naming** — No flat files like `my_notes.md` or `project-commands.md`
|
||||
- [ ] **Hierarchy is 2-3 levels deep** — e.g., `project/tooling/bun.md`, not just `project.md`
|
||||
- [ ] **No file exceeds ~40 lines** — Split larger files
|
||||
- [ ] **Each file has one concept** — If 2+ topics, split into 2+ files
|
||||
- [ ] **Hierarchy is meaningful** — nested paths should improve discoverability and organization.
|
||||
- [ ] **No file is bloated** — split files that are hard to scan quickly.
|
||||
- [ ] **Each file stays concept-focused** — split files that combine unrelated topics.
|
||||
- [ ] **Every file has real content** — No empty or pointer-only files
|
||||
- [ ] **Verify sync**: After creating files, check they appear in your memory files
|
||||
|
||||
**If you have fewer than 15 files, you haven't split enough. Go back and split more.**
|
||||
**If the structure still feels flat or monolithic, split further until it is clear and maintainable.**
|
||||
|
||||
### Best Practices
|
||||
|
||||
1. **Check memfs status first**: Look for `memory_filesystem` section in your system prompt
|
||||
2. **Start with directories**: Create the directory structure before populating files
|
||||
3. **Use short paths**: Aim for 2-3 levels (e.g., `project/tooling/testing`, not `project/dev/tools/testing/setup`)
|
||||
4. **Keep files focused**: Each file should cover one concept (~40 lines max)
|
||||
3. **Use practical paths**: prefer clear, readable nesting (e.g., `project/tooling/testing`) over unnecessarily deep paths.
|
||||
4. **Keep files focused**: each file should cover a coherent concept and remain easy to scan.
|
||||
5. **Every file should have real content** — no empty or pointer-only files
|
||||
6. **Be aggressive about splitting**: If in doubt, split. Too many small files is better than too few large ones.
|
||||
|
||||
@@ -636,15 +634,11 @@ LINES=$(wc -l < ~/.claude/history.jsonl)
|
||||
CHUNK_SIZE=$(( LINES / NUM_WORKERS + 1 ))
|
||||
split -l $CHUNK_SIZE ~/.claude/history.jsonl "$SPLIT_DIR/claude-"
|
||||
|
||||
# Split Codex history (if it exists and is large enough to warrant splitting)
|
||||
# Split Codex history if it exists
|
||||
if [ -f ~/.codex/history.jsonl ]; then
|
||||
LINES=$(wc -l < ~/.codex/history.jsonl)
|
||||
if [ "$LINES" -gt 100 ]; then
|
||||
CHUNK_SIZE=$(( LINES / NUM_WORKERS + 1 ))
|
||||
split -l $CHUNK_SIZE ~/.codex/history.jsonl "$SPLIT_DIR/codex-"
|
||||
else
|
||||
cp ~/.codex/history.jsonl "$SPLIT_DIR/codex-aa"
|
||||
fi
|
||||
CHUNK_SIZE=$(( LINES / NUM_WORKERS + 1 ))
|
||||
split -l $CHUNK_SIZE ~/.codex/history.jsonl "$SPLIT_DIR/codex-"
|
||||
fi
|
||||
|
||||
# Rename to .jsonl for clarity
|
||||
@@ -702,12 +696,12 @@ After merging, **read every file in `system/`** and apply editorial judgment:
|
||||
|
||||
Workers may have created files that don't fit the ideal hierarchy, or put too much into `system/`. Fix this:
|
||||
|
||||
- Split oversized files (>40 lines) into focused sub-files
|
||||
- Split oversized or conceptually mixed files into focused sub-files
|
||||
- Move reference-quality content (detailed history, background context, evidence trails) to `reference/`
|
||||
- Ensure `system/` contains only what you genuinely need in-context: identity, active preferences, current project context, behavioral rules, gotchas
|
||||
- Merge near-duplicate files that cover the same topic
|
||||
|
||||
**Rule of thumb**: If removing a file from `system/` wouldn't hurt your next 10 responses, it belongs in `reference/`.
|
||||
**Rule of thumb**: If removing a file from `system/` wouldn't materially affect near-term responses, it belongs in `reference/`.
|
||||
|
||||
**3d. Clean up worktrees and branches:**
|
||||
|
||||
|
||||
@@ -5,7 +5,7 @@ import { createAgentWithBaseToolsRecovery } from "../../agent/create";
|
||||
function missingBaseToolsError(): Error & { status: number } {
|
||||
return Object.assign(
|
||||
new Error(
|
||||
`400 {"detail":"Tools not found by name: {'fetch_webpage', 'memory'}"}`,
|
||||
`400 {"detail":"Tools not found by name: {'fetch_webpage', 'memory_apply_patch'}"}`,
|
||||
),
|
||||
{ status: 400 },
|
||||
);
|
||||
@@ -25,7 +25,7 @@ describe("createAgentWithBaseToolsRecovery", () => {
|
||||
|
||||
const agent = await createAgentWithBaseToolsRecovery(
|
||||
createWithTools,
|
||||
["memory", "web_search", "fetch_webpage"],
|
||||
["memory_apply_patch", "web_search", "fetch_webpage"],
|
||||
addBaseTools,
|
||||
);
|
||||
|
||||
@@ -33,12 +33,12 @@ describe("createAgentWithBaseToolsRecovery", () => {
|
||||
expect(addBaseTools).toHaveBeenCalledTimes(1);
|
||||
expect(createWithTools).toHaveBeenCalledTimes(2);
|
||||
expect(createWithTools.mock.calls[0]?.[0]).toEqual([
|
||||
"memory",
|
||||
"memory_apply_patch",
|
||||
"web_search",
|
||||
"fetch_webpage",
|
||||
]);
|
||||
expect(createWithTools.mock.calls[1]?.[0]).toEqual([
|
||||
"memory",
|
||||
"memory_apply_patch",
|
||||
"web_search",
|
||||
"fetch_webpage",
|
||||
]);
|
||||
@@ -59,7 +59,7 @@ describe("createAgentWithBaseToolsRecovery", () => {
|
||||
|
||||
const agent = await createAgentWithBaseToolsRecovery(
|
||||
createWithTools,
|
||||
["memory", "web_search", "fetch_webpage"],
|
||||
["memory_apply_patch", "web_search", "fetch_webpage"],
|
||||
addBaseTools,
|
||||
);
|
||||
|
||||
|
||||
@@ -53,7 +53,7 @@ function tryCommit(): { success: boolean; output: string } {
|
||||
}
|
||||
|
||||
/** Valid frontmatter for convenience */
|
||||
const VALID_FM = "---\ndescription: Test block\nlimit: 20000\n---\n\n";
|
||||
const VALID_FM = "---\ndescription: Test block\n---\n\n";
|
||||
|
||||
beforeEach(() => {
|
||||
tempDir = mkdtempSync(join(tmpdir(), "memgit-test-"));
|
||||
@@ -92,7 +92,7 @@ describe("pre-commit hook: frontmatter required", () => {
|
||||
test("rejects unclosed frontmatter", () => {
|
||||
writeAndStage(
|
||||
"memory/system/broken.md",
|
||||
"---\ndescription: oops\nlimit: 20000\n\nContent without closing ---\n",
|
||||
"---\ndescription: oops\n\nContent without closing ---\n",
|
||||
);
|
||||
const result = tryCommit();
|
||||
expect(result.success).toBe(false);
|
||||
@@ -102,29 +102,16 @@ describe("pre-commit hook: frontmatter required", () => {
|
||||
|
||||
describe("pre-commit hook: required fields", () => {
|
||||
test("rejects missing description", () => {
|
||||
writeAndStage(
|
||||
"memory/system/bad.md",
|
||||
"---\nlimit: 20000\n---\n\nContent.\n",
|
||||
);
|
||||
writeAndStage("memory/system/bad.md", "---\n---\n\nContent.\n");
|
||||
const result = tryCommit();
|
||||
expect(result.success).toBe(false);
|
||||
expect(result.output).toContain("missing required field 'description'");
|
||||
});
|
||||
|
||||
test("rejects missing limit", () => {
|
||||
writeAndStage(
|
||||
"memory/system/bad.md",
|
||||
"---\ndescription: A block\n---\n\nContent.\n",
|
||||
);
|
||||
const result = tryCommit();
|
||||
expect(result.success).toBe(false);
|
||||
expect(result.output).toContain("missing required field 'limit'");
|
||||
});
|
||||
|
||||
test("rejects empty description", () => {
|
||||
writeAndStage(
|
||||
"memory/system/bad.md",
|
||||
"---\ndescription:\nlimit: 20000\n---\n\nContent.\n",
|
||||
"---\ndescription:\n---\n\nContent.\n",
|
||||
);
|
||||
const result = tryCommit();
|
||||
expect(result.success).toBe(false);
|
||||
@@ -133,50 +120,10 @@ describe("pre-commit hook: required fields", () => {
|
||||
});
|
||||
|
||||
describe("pre-commit hook: field validation", () => {
|
||||
test("rejects non-integer limit", () => {
|
||||
writeAndStage(
|
||||
"memory/system/bad.md",
|
||||
"---\ndescription: valid\nlimit: abc\n---\n\nContent.\n",
|
||||
);
|
||||
const result = tryCommit();
|
||||
expect(result.success).toBe(false);
|
||||
expect(result.output).toContain("positive integer");
|
||||
});
|
||||
|
||||
test("rejects zero limit", () => {
|
||||
writeAndStage(
|
||||
"memory/system/bad.md",
|
||||
"---\ndescription: valid\nlimit: 0\n---\n\nContent.\n",
|
||||
);
|
||||
const result = tryCommit();
|
||||
expect(result.success).toBe(false);
|
||||
expect(result.output).toContain("positive integer");
|
||||
});
|
||||
|
||||
test("rejects negative limit", () => {
|
||||
writeAndStage(
|
||||
"memory/system/bad.md",
|
||||
"---\ndescription: valid\nlimit: -5\n---\n\nContent.\n",
|
||||
);
|
||||
const result = tryCommit();
|
||||
expect(result.success).toBe(false);
|
||||
expect(result.output).toContain("positive integer");
|
||||
});
|
||||
|
||||
test("rejects float limit", () => {
|
||||
writeAndStage(
|
||||
"memory/system/bad.md",
|
||||
"---\ndescription: valid\nlimit: 20.5\n---\n\nContent.\n",
|
||||
);
|
||||
const result = tryCommit();
|
||||
expect(result.success).toBe(false);
|
||||
expect(result.output).toContain("positive integer");
|
||||
});
|
||||
|
||||
test("allows limit with trailing whitespace", () => {
|
||||
test("allows legacy limit key for backward compatibility", () => {
|
||||
writeAndStage(
|
||||
"memory/system/ok.md",
|
||||
"---\ndescription: test\nlimit: 20000 \n---\n\nContent.\n",
|
||||
"---\ndescription: test\nlimit: legacy\n---\n\nContent.\n",
|
||||
);
|
||||
const result = tryCommit();
|
||||
expect(result.success).toBe(true);
|
||||
@@ -200,7 +147,7 @@ describe("pre-commit hook: read_only protection", () => {
|
||||
rmSync(hookPath);
|
||||
writeAndStage(
|
||||
"memory/system/skills.md",
|
||||
"---\ndescription: Skills\nlimit: 20000\nread_only: true\n---\n\nOriginal.\n",
|
||||
"---\ndescription: Skills\nread_only: true\n---\n\nOriginal.\n",
|
||||
);
|
||||
tryCommit();
|
||||
writeFileSync(hookPath, PRE_COMMIT_HOOK_SCRIPT, { mode: 0o755 });
|
||||
@@ -208,7 +155,7 @@ describe("pre-commit hook: read_only protection", () => {
|
||||
// Second commit: try to modify it
|
||||
writeAndStage(
|
||||
"memory/system/skills.md",
|
||||
"---\ndescription: Skills\nlimit: 20000\nread_only: true\n---\n\nModified.\n",
|
||||
"---\ndescription: Skills\nread_only: true\n---\n\nModified.\n",
|
||||
);
|
||||
const result = tryCommit();
|
||||
expect(result.success).toBe(false);
|
||||
@@ -218,7 +165,7 @@ describe("pre-commit hook: read_only protection", () => {
|
||||
test("rejects agent adding read_only to new file", () => {
|
||||
writeAndStage(
|
||||
"memory/system/new.md",
|
||||
"---\ndescription: New block\nlimit: 20000\nread_only: false\n---\n\nContent.\n",
|
||||
"---\ndescription: New block\nread_only: false\n---\n\nContent.\n",
|
||||
);
|
||||
const result = tryCommit();
|
||||
expect(result.success).toBe(false);
|
||||
@@ -232,7 +179,7 @@ describe("pre-commit hook: read_only protection", () => {
|
||||
rmSync(hookPath);
|
||||
writeAndStage(
|
||||
"memory/system/block.md",
|
||||
"---\ndescription: A block\nlimit: 20000\nread_only: false\n---\n\nContent.\n",
|
||||
"---\ndescription: A block\nread_only: false\n---\n\nContent.\n",
|
||||
);
|
||||
tryCommit();
|
||||
// Re-install hook
|
||||
@@ -241,7 +188,7 @@ describe("pre-commit hook: read_only protection", () => {
|
||||
// Now try to change read_only
|
||||
writeAndStage(
|
||||
"memory/system/block.md",
|
||||
"---\ndescription: A block\nlimit: 20000\nread_only: true\n---\n\nContent.\n",
|
||||
"---\ndescription: A block\nread_only: true\n---\n\nContent.\n",
|
||||
);
|
||||
const result = tryCommit();
|
||||
expect(result.success).toBe(false);
|
||||
@@ -254,7 +201,7 @@ describe("pre-commit hook: read_only protection", () => {
|
||||
rmSync(hookPath);
|
||||
writeAndStage(
|
||||
"memory/system/block.md",
|
||||
"---\ndescription: A block\nlimit: 20000\nread_only: false\n---\n\nOriginal.\n",
|
||||
"---\ndescription: A block\nread_only: false\n---\n\nOriginal.\n",
|
||||
);
|
||||
tryCommit();
|
||||
writeFileSync(hookPath, PRE_COMMIT_HOOK_SCRIPT, { mode: 0o755 });
|
||||
@@ -262,7 +209,7 @@ describe("pre-commit hook: read_only protection", () => {
|
||||
// Modify content but keep read_only the same
|
||||
writeAndStage(
|
||||
"memory/system/block.md",
|
||||
"---\ndescription: A block\nlimit: 20000\nread_only: false\n---\n\nUpdated.\n",
|
||||
"---\ndescription: A block\nread_only: false\n---\n\nUpdated.\n",
|
||||
);
|
||||
const result = tryCommit();
|
||||
expect(result.success).toBe(true);
|
||||
@@ -274,7 +221,7 @@ describe("pre-commit hook: read_only protection", () => {
|
||||
rmSync(hookPath);
|
||||
writeAndStage(
|
||||
"memory/system/block.md",
|
||||
"---\ndescription: A block\nlimit: 20000\nread_only: false\n---\n\nContent.\n",
|
||||
"---\ndescription: A block\nread_only: false\n---\n\nContent.\n",
|
||||
);
|
||||
tryCommit();
|
||||
writeFileSync(hookPath, PRE_COMMIT_HOOK_SCRIPT, { mode: 0o755 });
|
||||
@@ -282,7 +229,7 @@ describe("pre-commit hook: read_only protection", () => {
|
||||
// Remove read_only from frontmatter
|
||||
writeAndStage(
|
||||
"memory/system/block.md",
|
||||
"---\ndescription: A block\nlimit: 20000\n---\n\nContent.\n",
|
||||
"---\ndescription: A block\n---\n\nContent.\n",
|
||||
);
|
||||
const result = tryCommit();
|
||||
expect(result.success).toBe(false);
|
||||
@@ -290,6 +237,65 @@ describe("pre-commit hook: read_only protection", () => {
|
||||
});
|
||||
});
|
||||
|
||||
describe("pre-commit hook: skill path guard", () => {
|
||||
test("rejects legacy flat skill file in nested memory layout", () => {
|
||||
writeAndStage(
|
||||
"memory/skills/slack-search.md",
|
||||
`${VALID_FM}Legacy flat skill file.\n`,
|
||||
);
|
||||
const result = tryCommit();
|
||||
expect(result.success).toBe(false);
|
||||
expect(result.output).toContain("Use skills/<name>/SKILL.md");
|
||||
});
|
||||
|
||||
test("rejects legacy flat skill file in top-level layout", () => {
|
||||
writeAndStage("skills/slack-search.md", "Legacy flat skill file.\n");
|
||||
const result = tryCommit();
|
||||
expect(result.success).toBe(false);
|
||||
expect(result.output).toContain("Use skills/<name>/SKILL.md");
|
||||
});
|
||||
|
||||
test("allows canonical directory-based skill path", () => {
|
||||
writeAndStage("skills/slack-search/SKILL.md", "# Slack Search\n");
|
||||
const result = tryCommit();
|
||||
expect(result.success).toBe(true);
|
||||
});
|
||||
});
|
||||
|
||||
describe("pre-commit hook: top-level layout (no memory/ prefix)", () => {
|
||||
test("validates frontmatter for system files without memory/ prefix", () => {
|
||||
writeAndStage(
|
||||
"system/human.md",
|
||||
"Just plain content\nno frontmatter here\n",
|
||||
);
|
||||
const result = tryCommit();
|
||||
expect(result.success).toBe(false);
|
||||
expect(result.output).toContain("missing frontmatter");
|
||||
});
|
||||
|
||||
test("allows valid frontmatter in system files without memory/ prefix", () => {
|
||||
writeAndStage("system/human.md", `${VALID_FM}Block content here.\n`);
|
||||
const result = tryCommit();
|
||||
expect(result.success).toBe(true);
|
||||
});
|
||||
|
||||
test("validates frontmatter for reference files without memory/ prefix", () => {
|
||||
writeAndStage("reference/notes.md", "No frontmatter\n");
|
||||
const result = tryCommit();
|
||||
expect(result.success).toBe(false);
|
||||
expect(result.output).toContain("missing frontmatter");
|
||||
});
|
||||
|
||||
test("skips SKILL.md files inside skill directories", () => {
|
||||
writeAndStage(
|
||||
"skills/my-skill/SKILL.md",
|
||||
"# My Skill\nNo frontmatter needed.\n",
|
||||
);
|
||||
const result = tryCommit();
|
||||
expect(result.success).toBe(true);
|
||||
});
|
||||
});
|
||||
|
||||
describe("pre-commit hook: non-memory files", () => {
|
||||
test("ignores non-memory files", () => {
|
||||
writeAndStage("README.md", "---\nbogus: true\n---\n\nThis is fine.\n");
|
||||
|
||||
@@ -33,7 +33,8 @@ describe("queue ordering wiring", () => {
|
||||
// Queue is now drained via QueueRuntime.consumeItems; setQueueDisplay is
|
||||
// updated automatically via the onDequeued callback — no direct setState here.
|
||||
expect(segment).toContain("tuiQueueRef.current?.consumeItems(queueLen)");
|
||||
expect(segment).toContain("onSubmitRef.current(concatenatedMessage);");
|
||||
expect(segment).toContain("onSubmitRef.current(concatenatedMessage)");
|
||||
expect(segment).toContain("!dequeueInFlightRef.current");
|
||||
expect(segment).toContain("queuedOverlayAction,");
|
||||
});
|
||||
|
||||
|
||||
@@ -11,6 +11,23 @@ import {
|
||||
finalizeAutoReflectionPayload,
|
||||
getReflectionTranscriptPaths,
|
||||
} from "../../cli/helpers/reflectionTranscript";
|
||||
import { DIRECTORY_LIMIT_ENV } from "../../utils/directoryLimits";
|
||||
|
||||
const DIRECTORY_LIMIT_ENV_KEYS = Object.values(DIRECTORY_LIMIT_ENV);
|
||||
const ORIGINAL_DIRECTORY_ENV = Object.fromEntries(
|
||||
DIRECTORY_LIMIT_ENV_KEYS.map((key) => [key, process.env[key]]),
|
||||
) as Record<string, string | undefined>;
|
||||
|
||||
function restoreDirectoryLimitEnv(): void {
|
||||
for (const key of DIRECTORY_LIMIT_ENV_KEYS) {
|
||||
const original = ORIGINAL_DIRECTORY_ENV[key];
|
||||
if (original === undefined) {
|
||||
delete process.env[key];
|
||||
} else {
|
||||
process.env[key] = original;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
describe("reflectionTranscript helper", () => {
|
||||
const agentId = "agent-test";
|
||||
@@ -23,6 +40,7 @@ describe("reflectionTranscript helper", () => {
|
||||
});
|
||||
|
||||
afterEach(async () => {
|
||||
restoreDirectoryLimitEnv();
|
||||
delete process.env.LETTA_TRANSCRIPT_ROOT;
|
||||
await rm(testRoot, { recursive: true, force: true });
|
||||
});
|
||||
@@ -182,6 +200,35 @@ describe("reflectionTranscript helper", () => {
|
||||
expect(snapshot).toContain("</parent_memory>");
|
||||
});
|
||||
|
||||
test("buildParentMemorySnapshot collapses large users directory with omission marker", async () => {
|
||||
process.env[DIRECTORY_LIMIT_ENV.memfsTreeMaxChildrenPerDir] = "3";
|
||||
|
||||
const memoryDir = join(testRoot, "memory-large-users");
|
||||
await mkdir(join(memoryDir, "system"), { recursive: true });
|
||||
await mkdir(join(memoryDir, "users"), { recursive: true });
|
||||
|
||||
await writeFile(
|
||||
join(memoryDir, "system", "human.md"),
|
||||
"---\ndescription: User context\n---\nSystem content\n",
|
||||
"utf-8",
|
||||
);
|
||||
|
||||
for (let idx = 0; idx < 10; idx += 1) {
|
||||
const suffix = String(idx).padStart(2, "0");
|
||||
await writeFile(
|
||||
join(memoryDir, "users", `user_${suffix}.md`),
|
||||
`---\ndescription: User block ${suffix}\n---\ncontent ${suffix}\n`,
|
||||
"utf-8",
|
||||
);
|
||||
}
|
||||
|
||||
const snapshot = await buildParentMemorySnapshot(memoryDir);
|
||||
|
||||
expect(snapshot).toContain("users/");
|
||||
expect(snapshot).toContain("… (7 more entries)");
|
||||
expect(snapshot).not.toContain("user_09.md");
|
||||
});
|
||||
|
||||
test("buildReflectionSubagentPrompt uses expanded reflection instructions", () => {
|
||||
const prompt = buildReflectionSubagentPrompt({
|
||||
transcriptPath: "/tmp/transcript.txt",
|
||||
|
||||
@@ -5,6 +5,12 @@ import {
|
||||
isShellOutputTool,
|
||||
} from "../../cli/helpers/toolNameMapping";
|
||||
|
||||
describe("toolNameMapping display mappings", () => {
|
||||
test("maps memory_apply_patch to a friendly label", () => {
|
||||
expect(getDisplayToolName("memory_apply_patch")).toBe("Memory Patch");
|
||||
});
|
||||
});
|
||||
|
||||
describe("toolNameMapping.isMemoryTool", () => {
|
||||
test("recognizes all supported memory tool names", () => {
|
||||
expect(isMemoryTool("memory")).toBe(true);
|
||||
|
||||
@@ -217,6 +217,25 @@ describe("computeDiffPreviews", () => {
|
||||
expect(previews.map((p) => p.fileName).sort()).toEqual(["a.txt", "b.txt"]);
|
||||
});
|
||||
|
||||
it("returns one preview per file for memory_apply_patch", async () => {
|
||||
const patch = [
|
||||
"*** Begin Patch",
|
||||
"*** Update File: system/a.md",
|
||||
"@@ -1 +1 @@",
|
||||
"-old",
|
||||
"+new",
|
||||
"*** Add File: reference/b.md",
|
||||
"+hello",
|
||||
"*** End Patch",
|
||||
].join("\n");
|
||||
|
||||
const previews = await computeDiffPreviews("memory_apply_patch", {
|
||||
input: patch,
|
||||
});
|
||||
expect(previews).toHaveLength(2);
|
||||
expect(previews.map((p) => p.fileName).sort()).toEqual(["a.md", "b.md"]);
|
||||
});
|
||||
|
||||
it("resolves relative file paths against the provided working directory", async () => {
|
||||
const tempRoot = await mkdtemp(
|
||||
path.join(os.tmpdir(), "letta-diff-preview-"),
|
||||
|
||||
@@ -233,6 +233,26 @@ test("plan mode - allows Read", () => {
|
||||
expect(result.matchedRule).toBe("plan mode");
|
||||
});
|
||||
|
||||
test("plan mode - allows ViewImage", () => {
|
||||
permissionMode.setMode("plan");
|
||||
|
||||
const permissions: PermissionRules = {
|
||||
allow: [],
|
||||
deny: [],
|
||||
ask: [],
|
||||
};
|
||||
|
||||
const result = checkPermission(
|
||||
"ViewImage",
|
||||
{ path: "/Users/test/image.png" },
|
||||
permissions,
|
||||
"/Users/test/project",
|
||||
);
|
||||
|
||||
expect(result.decision).toBe("allow");
|
||||
expect(result.matchedRule).toBe("plan mode");
|
||||
});
|
||||
|
||||
test("plan mode - allows TaskOutput", () => {
|
||||
permissionMode.setMode("plan");
|
||||
|
||||
|
||||
30
src/tests/tools/clip-tool-return.test.ts
Normal file
30
src/tests/tools/clip-tool-return.test.ts
Normal file
@@ -0,0 +1,30 @@
|
||||
import { describe, expect, test } from "bun:test";
|
||||
import { clipToolReturn } from "../../tools/manager";
|
||||
|
||||
describe("clipToolReturn", () => {
|
||||
test("clips long single-line output and appends ellipsis", () => {
|
||||
const long = "A".repeat(1200);
|
||||
const clipped = clipToolReturn(long);
|
||||
|
||||
expect(clipped.length).toBeLessThan(400);
|
||||
expect(clipped.endsWith("…")).toBe(true);
|
||||
});
|
||||
|
||||
test("clips by line count for multiline output", () => {
|
||||
const text = "line1\nline2\nline3\nline4\nline5";
|
||||
const clipped = clipToolReturn(text, 3, 10_000);
|
||||
|
||||
expect(clipped).toContain("line1");
|
||||
expect(clipped).toContain("line2");
|
||||
expect(clipped).toContain("line3");
|
||||
expect(clipped).not.toContain("line4");
|
||||
expect(clipped.endsWith("…")).toBe(true);
|
||||
});
|
||||
|
||||
test("does not clip user-denial reasons", () => {
|
||||
const denial = `Error: request to call tool denied. User reason: ${"B".repeat(800)}`;
|
||||
const clipped = clipToolReturn(denial);
|
||||
|
||||
expect(clipped).toBe(denial);
|
||||
});
|
||||
});
|
||||
259
src/tests/tools/memory-apply-patch.test.ts
Normal file
259
src/tests/tools/memory-apply-patch.test.ts
Normal file
@@ -0,0 +1,259 @@
|
||||
import { afterEach, beforeEach, describe, expect, mock, test } from "bun:test";
|
||||
import { execFile as execFileCb } from "node:child_process";
|
||||
import { mkdtempSync, writeFileSync } from "node:fs";
|
||||
import { rm } from "node:fs/promises";
|
||||
import { tmpdir } from "node:os";
|
||||
import { join } from "node:path";
|
||||
import { promisify } from "node:util";
|
||||
|
||||
const execFile = promisify(execFileCb);
|
||||
|
||||
const TEST_AGENT_ID = "agent-test-memory-apply-patch";
|
||||
const TEST_AGENT_NAME = "Bob";
|
||||
|
||||
mock.module("../../agent/context", () => ({
|
||||
getCurrentAgentId: () => TEST_AGENT_ID,
|
||||
}));
|
||||
|
||||
mock.module("../../agent/client", () => ({
|
||||
getClient: mock(() =>
|
||||
Promise.resolve({
|
||||
agents: {
|
||||
retrieve: mock(() => Promise.resolve({ name: TEST_AGENT_NAME })),
|
||||
},
|
||||
}),
|
||||
),
|
||||
getServerUrl: () => "http://localhost:8283",
|
||||
}));
|
||||
|
||||
const { memory_apply_patch } = await import(
|
||||
"../../tools/impl/MemoryApplyPatch"
|
||||
);
|
||||
|
||||
async function runGit(cwd: string, args: string[]): Promise<string> {
|
||||
const { stdout } = await execFile("git", args, { cwd });
|
||||
return String(stdout ?? "").trim();
|
||||
}
|
||||
|
||||
describe("memory_apply_patch tool", () => {
|
||||
let tempRoot: string;
|
||||
let memoryDir: string;
|
||||
let remoteDir: string;
|
||||
|
||||
const originalMemoryDir = process.env.MEMORY_DIR;
|
||||
const originalAgentId = process.env.AGENT_ID;
|
||||
const originalAgentName = process.env.AGENT_NAME;
|
||||
|
||||
beforeEach(async () => {
|
||||
tempRoot = mkdtempSync(join(tmpdir(), "letta-memory-apply-patch-"));
|
||||
memoryDir = join(tempRoot, "memory");
|
||||
remoteDir = join(tempRoot, "remote.git");
|
||||
|
||||
await execFile("git", ["init", "--bare", remoteDir]);
|
||||
await execFile("git", ["init", "-b", "main", memoryDir]);
|
||||
await runGit(memoryDir, ["config", "user.name", "setup"]);
|
||||
await runGit(memoryDir, ["config", "user.email", "setup@example.com"]);
|
||||
await runGit(memoryDir, ["remote", "add", "origin", remoteDir]);
|
||||
|
||||
writeFileSync(join(memoryDir, ".gitkeep"), "", "utf8");
|
||||
await runGit(memoryDir, ["add", ".gitkeep"]);
|
||||
await runGit(memoryDir, ["commit", "-m", "initial"]);
|
||||
await runGit(memoryDir, ["push", "-u", "origin", "main"]);
|
||||
|
||||
process.env.MEMORY_DIR = memoryDir;
|
||||
process.env.AGENT_ID = TEST_AGENT_ID;
|
||||
process.env.AGENT_NAME = TEST_AGENT_NAME;
|
||||
});
|
||||
|
||||
afterEach(async () => {
|
||||
if (originalMemoryDir === undefined) delete process.env.MEMORY_DIR;
|
||||
else process.env.MEMORY_DIR = originalMemoryDir;
|
||||
|
||||
if (originalAgentId === undefined) delete process.env.AGENT_ID;
|
||||
else process.env.AGENT_ID = originalAgentId;
|
||||
|
||||
if (originalAgentName === undefined) delete process.env.AGENT_NAME;
|
||||
else process.env.AGENT_NAME = originalAgentName;
|
||||
|
||||
if (tempRoot) {
|
||||
await rm(tempRoot, { recursive: true, force: true });
|
||||
}
|
||||
});
|
||||
|
||||
test("requires reason and input", async () => {
|
||||
await expect(
|
||||
memory_apply_patch({
|
||||
input: "*** Begin Patch\n*** End Patch",
|
||||
} as Parameters<typeof memory_apply_patch>[0]),
|
||||
).rejects.toThrow(/missing required parameter/i);
|
||||
});
|
||||
|
||||
test("adds and updates memory files with commit reason and agent author", async () => {
|
||||
const seedPatch = [
|
||||
"*** Begin Patch",
|
||||
"*** Add File: system/contacts.md",
|
||||
"+---",
|
||||
"+description: Contacts",
|
||||
"+---",
|
||||
"+Sarah: cofounder",
|
||||
"*** End Patch",
|
||||
].join("\n");
|
||||
|
||||
await memory_apply_patch({
|
||||
reason: "Create contacts memory via patch",
|
||||
input: seedPatch,
|
||||
});
|
||||
|
||||
const updatePatch = [
|
||||
"*** Begin Patch",
|
||||
"*** Update File: system/contacts.md",
|
||||
"@@",
|
||||
"-Sarah: cofounder",
|
||||
"+Sarah: Letta cofounder",
|
||||
"*** End Patch",
|
||||
].join("\n");
|
||||
|
||||
await memory_apply_patch({
|
||||
reason: "Refine contacts memory via patch",
|
||||
input: updatePatch,
|
||||
});
|
||||
|
||||
const content = await runGit(memoryDir, [
|
||||
"show",
|
||||
"HEAD:system/contacts.md",
|
||||
]);
|
||||
expect(content).toContain("Sarah: Letta cofounder");
|
||||
|
||||
const logOutput = await runGit(memoryDir, [
|
||||
"log",
|
||||
"-1",
|
||||
"--pretty=format:%s%n%an%n%ae",
|
||||
]);
|
||||
const [subject, authorName, authorEmail] = logOutput.split("\n");
|
||||
expect(subject).toBe("Refine contacts memory via patch");
|
||||
expect(authorName).toBe(TEST_AGENT_NAME);
|
||||
expect(authorEmail).toBe(`${TEST_AGENT_ID}@letta.com`);
|
||||
});
|
||||
|
||||
test("rejects absolute paths outside MEMORY_DIR", async () => {
|
||||
const patch = [
|
||||
"*** Begin Patch",
|
||||
"*** Add File: /tmp/outside.md",
|
||||
"+hello",
|
||||
"*** End Patch",
|
||||
].join("\n");
|
||||
|
||||
await expect(
|
||||
memory_apply_patch({
|
||||
reason: "should fail",
|
||||
input: patch,
|
||||
}),
|
||||
).rejects.toThrow(/only be used to modify files/i);
|
||||
});
|
||||
|
||||
test("rejects editing read_only memory files", async () => {
|
||||
await memory_apply_patch({
|
||||
reason: "seed read only",
|
||||
input: [
|
||||
"*** Begin Patch",
|
||||
"*** Add File: system/ro.md",
|
||||
"+---",
|
||||
"+description: Read only",
|
||||
"+read_only: true",
|
||||
"+---",
|
||||
"+keep",
|
||||
"*** End Patch",
|
||||
].join("\n"),
|
||||
});
|
||||
|
||||
await expect(
|
||||
memory_apply_patch({
|
||||
reason: "attempt edit ro",
|
||||
input: [
|
||||
"*** Begin Patch",
|
||||
"*** Update File: system/ro.md",
|
||||
"@@",
|
||||
"-keep",
|
||||
"+change",
|
||||
"*** End Patch",
|
||||
].join("\n"),
|
||||
}),
|
||||
).rejects.toThrow(/read_only/i);
|
||||
});
|
||||
|
||||
test("returns error when push fails but keeps local commit", async () => {
|
||||
await memory_apply_patch({
|
||||
reason: "seed notes",
|
||||
input: [
|
||||
"*** Begin Patch",
|
||||
"*** Add File: reference/history/notes.md",
|
||||
"+old",
|
||||
"*** End Patch",
|
||||
].join("\n"),
|
||||
});
|
||||
|
||||
await runGit(memoryDir, [
|
||||
"remote",
|
||||
"set-url",
|
||||
"origin",
|
||||
join(tempRoot, "missing-remote.git"),
|
||||
]);
|
||||
|
||||
const reason = "Update notes with failing push";
|
||||
await expect(
|
||||
memory_apply_patch({
|
||||
reason,
|
||||
input: [
|
||||
"*** Begin Patch",
|
||||
"*** Update File: reference/history/notes.md",
|
||||
"@@",
|
||||
"-old",
|
||||
"+new",
|
||||
"*** End Patch",
|
||||
].join("\n"),
|
||||
}),
|
||||
).rejects.toThrow(/committed .* but push failed/i);
|
||||
|
||||
const subject = await runGit(memoryDir, [
|
||||
"log",
|
||||
"-1",
|
||||
"--pretty=format:%s",
|
||||
]);
|
||||
expect(subject).toBe(reason);
|
||||
});
|
||||
|
||||
test("updates files that omit frontmatter limit", async () => {
|
||||
await memory_apply_patch({
|
||||
reason: "seed no-limit memory",
|
||||
input: [
|
||||
"*** Begin Patch",
|
||||
"*** Add File: system/no-limit.md",
|
||||
"+---",
|
||||
"+description: No limit",
|
||||
"+---",
|
||||
"+before",
|
||||
"*** End Patch",
|
||||
].join("\n"),
|
||||
});
|
||||
|
||||
await memory_apply_patch({
|
||||
reason: "update no-limit memory",
|
||||
input: [
|
||||
"*** Begin Patch",
|
||||
"*** Update File: system/no-limit.md",
|
||||
"@@",
|
||||
"-before",
|
||||
"+after",
|
||||
"*** End Patch",
|
||||
].join("\n"),
|
||||
});
|
||||
|
||||
const content = await runGit(memoryDir, [
|
||||
"show",
|
||||
"HEAD:system/no-limit.md",
|
||||
]);
|
||||
expect(content).toContain("description: No limit");
|
||||
expect(content).not.toContain("limit:");
|
||||
expect(content).toContain("after");
|
||||
});
|
||||
});
|
||||
@@ -57,6 +57,14 @@ describe("isConversationBusyError", () => {
|
||||
).toBe(true);
|
||||
});
|
||||
|
||||
test("detects busy error with run_id (run_id breaks old substring match)", () => {
|
||||
expect(
|
||||
isConversationBusyError(
|
||||
"Cannot send a new message: Another request (run_id=run-abc-123) is currently being processed for this conversation. Please wait for it to complete.",
|
||||
),
|
||||
).toBe(true);
|
||||
});
|
||||
|
||||
test("rejects approval-pending", () => {
|
||||
expect(isConversationBusyError("The agent is waiting for approval")).toBe(
|
||||
false,
|
||||
|
||||
@@ -6,6 +6,9 @@ import type {
|
||||
MessageQueueItem,
|
||||
TaskNotificationQueueItem,
|
||||
} from "../../queue/queueRuntime";
|
||||
import { queueSkillContent } from "../../tools/impl/skillContentRegistry";
|
||||
import { resolveRecoveredApprovalResponse } from "../../websocket/listener/recovery";
|
||||
import { injectQueuedSkillContent } from "../../websocket/listener/skill-injection";
|
||||
import type { IncomingMessage } from "../../websocket/listener/types";
|
||||
|
||||
type MockStream = {
|
||||
@@ -197,6 +200,8 @@ function makeIncomingMessage(
|
||||
|
||||
describe("listen-client multi-worker concurrency", () => {
|
||||
beforeEach(() => {
|
||||
queueSkillContent("__test-cleanup__", "__test-cleanup__");
|
||||
injectQueuedSkillContent([]);
|
||||
permissionMode.reset();
|
||||
sendMessageStreamMock.mockClear();
|
||||
getStreamToolContextIdMock.mockClear();
|
||||
@@ -753,6 +758,11 @@ describe("listen-client multi-worker concurrency", () => {
|
||||
throw new Error("Expected stale recovery queued task item");
|
||||
}
|
||||
|
||||
queueSkillContent(
|
||||
"tool-call-1",
|
||||
"<searching-messages>stale recovery skill content</searching-messages>",
|
||||
);
|
||||
|
||||
const recoveryPromise = __listenClientTestUtils.resolveStaleApprovals(
|
||||
runtime,
|
||||
socket as unknown as WebSocket,
|
||||
@@ -766,11 +776,14 @@ describe("listen-client multi-worker concurrency", () => {
|
||||
const continuationMessages = sendMessageStreamMock.mock.calls[0]?.[1] as
|
||||
| Array<Record<string, unknown>>
|
||||
| undefined;
|
||||
expect(continuationMessages).toHaveLength(2);
|
||||
expect(continuationMessages?.[0]).toEqual({
|
||||
type: "approval",
|
||||
approvals: [approvalResult],
|
||||
});
|
||||
expect(continuationMessages).toHaveLength(3);
|
||||
expect(continuationMessages?.[0]).toEqual(
|
||||
expect.objectContaining({
|
||||
type: "approval",
|
||||
approvals: [approvalResult],
|
||||
otid: expect.any(String),
|
||||
}),
|
||||
);
|
||||
expect(continuationMessages?.[1]).toEqual({
|
||||
role: "user",
|
||||
content: [
|
||||
@@ -782,6 +795,16 @@ describe("listen-client multi-worker concurrency", () => {
|
||||
},
|
||||
],
|
||||
});
|
||||
expect(continuationMessages?.[2]).toEqual({
|
||||
role: "user",
|
||||
content: [
|
||||
{
|
||||
type: "text",
|
||||
text: "<searching-messages>stale recovery skill content</searching-messages>",
|
||||
},
|
||||
],
|
||||
otid: expect.any(String),
|
||||
});
|
||||
expect(runtime.loopStatus as string).toBe("PROCESSING_API_RESPONSE");
|
||||
expect(runtime.queueRuntime.length).toBe(0);
|
||||
expect(runtime.queuedMessagesByItemId.size).toBe(0);
|
||||
@@ -806,6 +829,156 @@ describe("listen-client multi-worker concurrency", () => {
|
||||
});
|
||||
});
|
||||
|
||||
test("interrupt-queue approval continuation appends skill content as trailing user message", async () => {
|
||||
const listener = __listenClientTestUtils.createListenerRuntime();
|
||||
__listenClientTestUtils.setActiveRuntime(listener);
|
||||
const runtime = __listenClientTestUtils.getOrCreateScopedRuntime(
|
||||
listener,
|
||||
"agent-1",
|
||||
"conv-int",
|
||||
);
|
||||
const socket = new MockSocket();
|
||||
|
||||
runtime.pendingInterruptedResults = [
|
||||
{
|
||||
type: "approval",
|
||||
tool_call_id: "call-int",
|
||||
approve: false,
|
||||
reason: "Interrupted by user",
|
||||
},
|
||||
] as never;
|
||||
runtime.pendingInterruptedContext = {
|
||||
agentId: "agent-1",
|
||||
conversationId: "conv-int",
|
||||
continuationEpoch: runtime.continuationEpoch,
|
||||
};
|
||||
runtime.pendingInterruptedToolCallIds = ["call-int"];
|
||||
|
||||
queueSkillContent(
|
||||
"call-int",
|
||||
"<searching-messages>interrupt path skill content</searching-messages>",
|
||||
);
|
||||
|
||||
await __listenClientTestUtils.handleIncomingMessage(
|
||||
{
|
||||
type: "message",
|
||||
agentId: "agent-1",
|
||||
conversationId: "conv-int",
|
||||
messages: [],
|
||||
} as unknown as IncomingMessage,
|
||||
socket as unknown as WebSocket,
|
||||
runtime,
|
||||
);
|
||||
|
||||
expect(sendMessageStreamMock.mock.calls.length).toBeGreaterThan(0);
|
||||
const firstSendMessages = sendMessageStreamMock.mock.calls[0]?.[1] as
|
||||
| Array<Record<string, unknown>>
|
||||
| undefined;
|
||||
|
||||
expect(firstSendMessages).toHaveLength(2);
|
||||
expect(firstSendMessages?.[0]).toMatchObject({
|
||||
type: "approval",
|
||||
approvals: [
|
||||
{
|
||||
tool_call_id: "call-int",
|
||||
approve: false,
|
||||
reason: "Interrupted by user",
|
||||
},
|
||||
],
|
||||
});
|
||||
expect(firstSendMessages?.[1]).toEqual({
|
||||
role: "user",
|
||||
content: [
|
||||
{
|
||||
type: "text",
|
||||
text: "<searching-messages>interrupt path skill content</searching-messages>",
|
||||
},
|
||||
],
|
||||
otid: expect.any(String),
|
||||
});
|
||||
});
|
||||
|
||||
test("recovered approval replay keeps approval-only routing and appends skill content at send boundary", async () => {
|
||||
const listener = __listenClientTestUtils.createListenerRuntime();
|
||||
__listenClientTestUtils.setActiveRuntime(listener);
|
||||
const runtime = __listenClientTestUtils.getOrCreateScopedRuntime(
|
||||
listener,
|
||||
"agent-1",
|
||||
"conv-recovered",
|
||||
);
|
||||
const socket = new MockSocket();
|
||||
|
||||
runtime.recoveredApprovalState = {
|
||||
agentId: "agent-1",
|
||||
conversationId: "conv-recovered",
|
||||
approvalsByRequestId: new Map([
|
||||
[
|
||||
"perm-recovered-1",
|
||||
{
|
||||
approval: {
|
||||
toolCallId: "tool-call-recovered-1",
|
||||
toolName: "Write",
|
||||
toolArgs: '{"file_path":"foo.ts"}',
|
||||
},
|
||||
controlRequest: {
|
||||
type: "control_request",
|
||||
request_id: "perm-recovered-1",
|
||||
request: {
|
||||
subtype: "can_use_tool",
|
||||
tool_name: "Write",
|
||||
input: { file_path: "foo.ts" },
|
||||
tool_call_id: "tool-call-recovered-1",
|
||||
permission_suggestions: [],
|
||||
blocked_path: null,
|
||||
},
|
||||
agent_id: "agent-1",
|
||||
conversation_id: "conv-recovered",
|
||||
},
|
||||
},
|
||||
],
|
||||
]),
|
||||
pendingRequestIds: new Set(["perm-recovered-1"]),
|
||||
responsesByRequestId: new Map(),
|
||||
};
|
||||
|
||||
queueSkillContent(
|
||||
"tool-call-recovered-1",
|
||||
"<searching-messages>recovered skill content</searching-messages>",
|
||||
);
|
||||
|
||||
await resolveRecoveredApprovalResponse(
|
||||
runtime,
|
||||
socket as unknown as WebSocket,
|
||||
{
|
||||
request_id: "perm-recovered-1",
|
||||
decision: { behavior: "allow" },
|
||||
},
|
||||
__listenClientTestUtils.handleIncomingMessage,
|
||||
{},
|
||||
);
|
||||
|
||||
expect(sendMessageStreamMock.mock.calls.length).toBeGreaterThan(0);
|
||||
const firstSendMessages = sendMessageStreamMock.mock.calls[0]?.[1] as
|
||||
| Array<Record<string, unknown>>
|
||||
| undefined;
|
||||
|
||||
expect(firstSendMessages).toHaveLength(2);
|
||||
expect(firstSendMessages?.[0]).toMatchObject({
|
||||
type: "approval",
|
||||
approvals: [],
|
||||
});
|
||||
expect(firstSendMessages?.[1]).toEqual({
|
||||
role: "user",
|
||||
content: [
|
||||
{
|
||||
type: "text",
|
||||
text: "<searching-messages>recovered skill content</searching-messages>",
|
||||
},
|
||||
],
|
||||
otid: expect.any(String),
|
||||
});
|
||||
});
|
||||
|
||||
test("queue pump status callbacks stay aggregate when another conversation is busy", async () => {
|
||||
const listener = __listenClientTestUtils.createListenerRuntime();
|
||||
__listenClientTestUtils.setActiveRuntime(listener);
|
||||
@@ -951,4 +1124,122 @@ describe("listen-client multi-worker concurrency", () => {
|
||||
expect(runtime.loopStatus).toBe("WAITING_ON_INPUT");
|
||||
expect(runtime.queuedMessagesByItemId.size).toBe(0);
|
||||
});
|
||||
|
||||
test("mid-turn mode changes apply to same-turn approval classification", async () => {
|
||||
const listener = __listenClientTestUtils.createListenerRuntime();
|
||||
__listenClientTestUtils.setActiveRuntime(listener);
|
||||
const runtime = __listenClientTestUtils.getOrCreateScopedRuntime(
|
||||
listener,
|
||||
"agent-1",
|
||||
"conv-mid",
|
||||
);
|
||||
const socket = new MockSocket();
|
||||
|
||||
let releaseFirstDrain!: () => void;
|
||||
const firstDrainGate = new Promise<void>((resolve) => {
|
||||
releaseFirstDrain = resolve;
|
||||
});
|
||||
let drainCount = 0;
|
||||
drainHandlers.set("conv-mid", async () => {
|
||||
drainCount += 1;
|
||||
if (drainCount === 1) {
|
||||
await firstDrainGate;
|
||||
return {
|
||||
stopReason: "requires_approval",
|
||||
approvals: [
|
||||
{
|
||||
toolCallId: "tc-1",
|
||||
toolName: "Bash",
|
||||
toolArgs: '{"command":"pwd"}',
|
||||
},
|
||||
],
|
||||
apiDurationMs: 0,
|
||||
};
|
||||
}
|
||||
return {
|
||||
stopReason: "end_turn",
|
||||
approvals: [],
|
||||
apiDurationMs: 0,
|
||||
};
|
||||
});
|
||||
|
||||
let capturedModeAtClassification: string | null = null;
|
||||
(classifyApprovalsMock as any).mockImplementationOnce(
|
||||
async (_approvals: any, opts: any) => {
|
||||
capturedModeAtClassification = opts?.permissionModeState?.mode ?? null;
|
||||
return {
|
||||
autoAllowed: [
|
||||
{
|
||||
approval: {
|
||||
toolCallId: "tc-1",
|
||||
toolName: "Bash",
|
||||
toolArgs: '{"command":"pwd"}',
|
||||
},
|
||||
permission: { decision: "allow" },
|
||||
context: null,
|
||||
parsedArgs: { command: "pwd" },
|
||||
},
|
||||
],
|
||||
autoDenied: [],
|
||||
needsUserInput: [],
|
||||
};
|
||||
},
|
||||
);
|
||||
(executeApprovalBatchMock as any).mockResolvedValueOnce([
|
||||
{
|
||||
type: "tool",
|
||||
tool_call_id: "tc-1",
|
||||
status: "success",
|
||||
tool_return: "ok",
|
||||
},
|
||||
]);
|
||||
|
||||
const turnPromise = __listenClientTestUtils.handleIncomingMessage(
|
||||
makeIncomingMessage("agent-1", "conv-mid", "run it"),
|
||||
socket as unknown as WebSocket,
|
||||
runtime,
|
||||
);
|
||||
|
||||
await waitFor(() => sendMessageStreamMock.mock.calls.length >= 1);
|
||||
|
||||
await __listenClientTestUtils.handleChangeDeviceStateInput(listener, {
|
||||
command: {
|
||||
type: "change_device_state",
|
||||
runtime: { agent_id: "agent-1", conversation_id: "conv-mid" },
|
||||
payload: { mode: "bypassPermissions" },
|
||||
},
|
||||
socket: socket as unknown as WebSocket,
|
||||
opts: {},
|
||||
processQueuedTurn: async () => {},
|
||||
});
|
||||
|
||||
releaseFirstDrain();
|
||||
|
||||
await turnPromise;
|
||||
|
||||
expect(capturedModeAtClassification === "bypassPermissions").toBe(true);
|
||||
});
|
||||
|
||||
test("change_device_state does not prune default-state entry mid-turn", async () => {
|
||||
const listener = __listenClientTestUtils.createListenerRuntime();
|
||||
__listenClientTestUtils.setActiveRuntime(listener);
|
||||
const socket = new MockSocket();
|
||||
|
||||
await __listenClientTestUtils.handleChangeDeviceStateInput(listener, {
|
||||
command: {
|
||||
type: "change_device_state",
|
||||
runtime: { agent_id: "agent-1", conversation_id: "default" },
|
||||
payload: { mode: "default" },
|
||||
},
|
||||
socket: socket as unknown as WebSocket,
|
||||
opts: {},
|
||||
processQueuedTurn: async () => {},
|
||||
});
|
||||
|
||||
expect(
|
||||
listener.permissionModeByConversation.has(
|
||||
"agent:agent-1::conversation:default",
|
||||
),
|
||||
).toBe(true);
|
||||
});
|
||||
});
|
||||
|
||||
93
src/tests/websocket/listener-permission-mode.test.ts
Normal file
93
src/tests/websocket/listener-permission-mode.test.ts
Normal file
@@ -0,0 +1,93 @@
|
||||
import { describe, expect, test } from "bun:test";
|
||||
import { __listenClientTestUtils } from "../../websocket/listen-client";
|
||||
import {
|
||||
getConversationPermissionModeState,
|
||||
getOrCreateConversationPermissionModeStateRef,
|
||||
getPermissionModeScopeKey,
|
||||
pruneConversationPermissionModeStateIfDefault,
|
||||
} from "../../websocket/listener/permissionMode";
|
||||
|
||||
describe("listener permission mode helpers", () => {
|
||||
test("getOrCreate ref preserves identity across legacy default-key migration", () => {
|
||||
const listener = __listenClientTestUtils.createListenerRuntime();
|
||||
const legacyKey = getPermissionModeScopeKey(null, "default");
|
||||
|
||||
const legacyState = {
|
||||
mode: "acceptEdits" as const,
|
||||
planFilePath: null,
|
||||
modeBeforePlan: null,
|
||||
};
|
||||
listener.permissionModeByConversation.set(legacyKey, legacyState);
|
||||
|
||||
const canonicalRef = getOrCreateConversationPermissionModeStateRef(
|
||||
listener,
|
||||
"agent-123",
|
||||
"default",
|
||||
);
|
||||
|
||||
expect(canonicalRef).toBe(legacyState);
|
||||
expect(listener.permissionModeByConversation.has(legacyKey)).toBe(false);
|
||||
expect(
|
||||
listener.permissionModeByConversation.get(
|
||||
getPermissionModeScopeKey("agent-123", "default"),
|
||||
),
|
||||
).toBe(legacyState);
|
||||
});
|
||||
|
||||
test("read getter returns default snapshot without materializing map entry", () => {
|
||||
const listener = __listenClientTestUtils.createListenerRuntime();
|
||||
const scopeKey = getPermissionModeScopeKey("agent-xyz", "conv-1");
|
||||
|
||||
const state = getConversationPermissionModeState(
|
||||
listener,
|
||||
"agent-xyz",
|
||||
"conv-1",
|
||||
);
|
||||
|
||||
expect(state.mode).toBeDefined();
|
||||
expect(listener.permissionModeByConversation.has(scopeKey)).toBe(false);
|
||||
});
|
||||
|
||||
test("prune removes only default-equivalent canonical entries", () => {
|
||||
const listener = __listenClientTestUtils.createListenerRuntime();
|
||||
const ref = getOrCreateConversationPermissionModeStateRef(
|
||||
listener,
|
||||
"agent-1",
|
||||
"conv-prune",
|
||||
);
|
||||
|
||||
const prunedDefault = pruneConversationPermissionModeStateIfDefault(
|
||||
listener,
|
||||
"agent-1",
|
||||
"conv-prune",
|
||||
);
|
||||
expect(prunedDefault).toBe(true);
|
||||
expect(
|
||||
listener.permissionModeByConversation.has(
|
||||
getPermissionModeScopeKey("agent-1", "conv-prune"),
|
||||
),
|
||||
).toBe(false);
|
||||
|
||||
const ref2 = getOrCreateConversationPermissionModeStateRef(
|
||||
listener,
|
||||
"agent-1",
|
||||
"conv-prune",
|
||||
);
|
||||
ref2.mode = "bypassPermissions";
|
||||
|
||||
const prunedNonDefault = pruneConversationPermissionModeStateIfDefault(
|
||||
listener,
|
||||
"agent-1",
|
||||
"conv-prune",
|
||||
);
|
||||
expect(prunedNonDefault).toBe(false);
|
||||
expect(
|
||||
listener.permissionModeByConversation.get(
|
||||
getPermissionModeScopeKey("agent-1", "conv-prune"),
|
||||
),
|
||||
).toBe(ref2);
|
||||
|
||||
// keep typechecker happy about intentionally unused ref
|
||||
expect(ref).toBeDefined();
|
||||
});
|
||||
});
|
||||
46
src/tools/descriptions/MemoryApplyPatch.md
Normal file
46
src/tools/descriptions/MemoryApplyPatch.md
Normal file
@@ -0,0 +1,46 @@
|
||||
Apply a codex-style patch to memory files in `$MEMORY_DIR`, then automatically commit and push the change.
|
||||
|
||||
This is similar to `apply_patch`, but scoped to the memory filesystem and with memory-aware guardrails.
|
||||
|
||||
- Required args:
|
||||
- `reason` — git commit message for the memory change
|
||||
- `input` — patch text using the standard apply_patch format
|
||||
|
||||
Patch format:
|
||||
- `*** Begin Patch`
|
||||
- `*** Add File: <path>`
|
||||
- `*** Update File: <path>`
|
||||
- optional `*** Move to: <path>`
|
||||
- one or more `@@` hunks with ` `, `-`, `+` lines
|
||||
- `*** Delete File: <path>`
|
||||
- `*** End Patch`
|
||||
|
||||
Path rules:
|
||||
- Relative paths are interpreted inside memory repo
|
||||
- Absolute paths are allowed only when under `$MEMORY_DIR`
|
||||
- Paths outside memory repo are rejected
|
||||
|
||||
Memory rules:
|
||||
- Operates on markdown memory files (`.md`)
|
||||
- Updated/deleted files must be valid memory files with frontmatter
|
||||
- `read_only: true` files cannot be modified
|
||||
- If adding a file without frontmatter, frontmatter is created automatically
|
||||
|
||||
Git behavior:
|
||||
- Stages changed memory paths
|
||||
- Commits with `reason`
|
||||
- Uses agent identity author (`<agent_id>@letta.com`)
|
||||
- Pushes to remote
|
||||
|
||||
Example:
|
||||
```python
|
||||
memory_apply_patch(
|
||||
reason="Refine coding preferences",
|
||||
input="""*** Begin Patch
|
||||
*** Update File: system/human/prefs/coding.md
|
||||
@@
|
||||
-Use broad abstractions
|
||||
+Prefer small focused helpers
|
||||
*** End Patch"""
|
||||
)
|
||||
```
|
||||
@@ -85,14 +85,11 @@ interface MemoryResult {
|
||||
interface ParsedMemoryFile {
|
||||
frontmatter: {
|
||||
description: string;
|
||||
limit: number;
|
||||
read_only?: string;
|
||||
};
|
||||
body: string;
|
||||
}
|
||||
|
||||
const DEFAULT_LIMIT = 2000;
|
||||
|
||||
export async function memory(args: MemoryArgs): Promise<MemoryResult> {
|
||||
validateRequiredParams(args, ["command", "reason"], "memory");
|
||||
|
||||
@@ -126,7 +123,6 @@ export async function memory(args: MemoryArgs): Promise<MemoryResult> {
|
||||
const rendered = renderMemoryFile(
|
||||
{
|
||||
description,
|
||||
limit: DEFAULT_LIMIT,
|
||||
},
|
||||
body,
|
||||
);
|
||||
@@ -273,6 +269,9 @@ export async function memory(args: MemoryArgs): Promise<MemoryResult> {
|
||||
};
|
||||
}
|
||||
|
||||
// Emit memory_updated push event so web UI auto-refreshes
|
||||
emitMemoryUpdated(affectedPaths);
|
||||
|
||||
return {
|
||||
message: `Memory ${command} applied and pushed (${commitResult.sha?.slice(0, 7) ?? "unknown"}).`,
|
||||
};
|
||||
@@ -451,7 +450,6 @@ function parseMemoryFile(content: string): ParsedMemoryFile {
|
||||
const body = match[2] ?? "";
|
||||
|
||||
let description: string | undefined;
|
||||
let limit: number | undefined;
|
||||
let readOnly: string | undefined;
|
||||
|
||||
for (const line of frontmatterText.split(/\r?\n/)) {
|
||||
@@ -462,11 +460,6 @@ function parseMemoryFile(content: string): ParsedMemoryFile {
|
||||
|
||||
if (key === "description") {
|
||||
description = value;
|
||||
} else if (key === "limit") {
|
||||
const parsedLimit = Number.parseInt(value, 10);
|
||||
if (!Number.isNaN(parsedLimit)) {
|
||||
limit = parsedLimit;
|
||||
}
|
||||
} else if (key === "read_only") {
|
||||
readOnly = value;
|
||||
}
|
||||
@@ -475,16 +468,9 @@ function parseMemoryFile(content: string): ParsedMemoryFile {
|
||||
if (!description || !description.trim()) {
|
||||
throw new Error("memory: target file frontmatter is missing 'description'");
|
||||
}
|
||||
if (!limit || !Number.isInteger(limit) || limit <= 0) {
|
||||
throw new Error(
|
||||
"memory: target file frontmatter is missing a valid positive 'limit'",
|
||||
);
|
||||
}
|
||||
|
||||
return {
|
||||
frontmatter: {
|
||||
description,
|
||||
limit,
|
||||
...(readOnly !== undefined ? { read_only: readOnly } : {}),
|
||||
},
|
||||
body,
|
||||
@@ -492,21 +478,16 @@ function parseMemoryFile(content: string): ParsedMemoryFile {
|
||||
}
|
||||
|
||||
function renderMemoryFile(
|
||||
frontmatter: { description: string; limit: number; read_only?: string },
|
||||
frontmatter: { description: string; read_only?: string },
|
||||
body: string,
|
||||
): string {
|
||||
const description = frontmatter.description.trim();
|
||||
if (!description) {
|
||||
throw new Error("memory: 'description' must not be empty");
|
||||
}
|
||||
if (!Number.isInteger(frontmatter.limit) || frontmatter.limit <= 0) {
|
||||
throw new Error("memory: 'limit' must be a positive integer");
|
||||
}
|
||||
|
||||
const lines = [
|
||||
"---",
|
||||
`description: ${sanitizeFrontmatterValue(description)}`,
|
||||
`limit: ${frontmatter.limit}`,
|
||||
];
|
||||
|
||||
if (frontmatter.read_only !== undefined) {
|
||||
@@ -621,3 +602,36 @@ function requireString(
|
||||
}
|
||||
return value;
|
||||
}
|
||||
|
||||
/**
|
||||
* Emit a `memory_updated` push event over the WebSocket so the web UI
|
||||
* can auto-refresh its memory index without polling.
|
||||
*/
|
||||
function emitMemoryUpdated(affectedPaths: string[]): void {
|
||||
try {
|
||||
// Lazy-import to avoid circular deps — this file is loaded before WS infra
|
||||
// eslint-disable-next-line @typescript-eslint/no-require-imports
|
||||
const { getActiveRuntime } =
|
||||
require("../../websocket/listener/runtime") as {
|
||||
getActiveRuntime: () => {
|
||||
socket: { readyState: number; send: (data: string) => void } | null;
|
||||
} | null;
|
||||
};
|
||||
|
||||
const runtime = getActiveRuntime();
|
||||
const socket = runtime?.socket;
|
||||
if (!socket || socket.readyState !== 1 /* WebSocket.OPEN */) {
|
||||
return;
|
||||
}
|
||||
|
||||
socket.send(
|
||||
JSON.stringify({
|
||||
type: "memory_updated",
|
||||
affected_paths: affectedPaths,
|
||||
timestamp: Date.now(),
|
||||
}),
|
||||
);
|
||||
} catch {
|
||||
// Best-effort — never break tool execution for a push event
|
||||
}
|
||||
}
|
||||
|
||||
871
src/tools/impl/MemoryApplyPatch.ts
Normal file
871
src/tools/impl/MemoryApplyPatch.ts
Normal file
@@ -0,0 +1,871 @@
|
||||
import { execFile as execFileCb } from "node:child_process";
|
||||
import { existsSync } from "node:fs";
|
||||
import {
|
||||
access,
|
||||
mkdir,
|
||||
readFile,
|
||||
rm,
|
||||
stat,
|
||||
unlink,
|
||||
writeFile,
|
||||
} from "node:fs/promises";
|
||||
import { homedir } from "node:os";
|
||||
import { dirname, isAbsolute, relative, resolve } from "node:path";
|
||||
import { promisify } from "node:util";
|
||||
import { getClient } from "../../agent/client";
|
||||
import { getCurrentAgentId } from "../../agent/context";
|
||||
import { validateRequiredParams } from "./validation";
|
||||
|
||||
const execFile = promisify(execFileCb);
|
||||
|
||||
type ParsedPatchOp =
|
||||
| {
|
||||
kind: "add";
|
||||
targetLabel: string;
|
||||
targetRelPath: string;
|
||||
contentLines: string[];
|
||||
}
|
||||
| {
|
||||
kind: "update";
|
||||
sourceLabel: string;
|
||||
sourceRelPath: string;
|
||||
targetLabel: string;
|
||||
targetRelPath: string;
|
||||
hunks: Hunk[];
|
||||
}
|
||||
| {
|
||||
kind: "delete";
|
||||
targetLabel: string;
|
||||
targetRelPath: string;
|
||||
};
|
||||
|
||||
interface Hunk {
|
||||
lines: string[];
|
||||
}
|
||||
|
||||
interface ParsedMemoryFile {
|
||||
frontmatter: {
|
||||
description: string;
|
||||
read_only?: string;
|
||||
};
|
||||
body: string;
|
||||
}
|
||||
|
||||
interface MemoryApplyPatchArgs {
|
||||
reason: string;
|
||||
input: string;
|
||||
}
|
||||
|
||||
interface MemoryApplyPatchResult {
|
||||
message: string;
|
||||
}
|
||||
|
||||
async function getAgentIdentity(): Promise<{
|
||||
agentId: string;
|
||||
agentName: string;
|
||||
}> {
|
||||
const envAgentId = (
|
||||
process.env.AGENT_ID ||
|
||||
process.env.LETTA_AGENT_ID ||
|
||||
""
|
||||
).trim();
|
||||
const contextAgentId = (() => {
|
||||
try {
|
||||
return getCurrentAgentId().trim();
|
||||
} catch {
|
||||
return "";
|
||||
}
|
||||
})();
|
||||
const agentId = contextAgentId || envAgentId;
|
||||
|
||||
if (!agentId) {
|
||||
throw new Error(
|
||||
"memory_apply_patch: unable to resolve agent id for git author email",
|
||||
);
|
||||
}
|
||||
|
||||
let agentName = "";
|
||||
try {
|
||||
const client = await getClient();
|
||||
const agent = await client.agents.retrieve(agentId);
|
||||
agentName = (agent.name || "").trim();
|
||||
} catch {
|
||||
// best-effort fallback below
|
||||
}
|
||||
|
||||
if (!agentName) {
|
||||
agentName = (process.env.AGENT_NAME || "").trim() || agentId;
|
||||
}
|
||||
|
||||
return { agentId, agentName };
|
||||
}
|
||||
|
||||
export async function memory_apply_patch(
|
||||
args: MemoryApplyPatchArgs,
|
||||
): Promise<MemoryApplyPatchResult> {
|
||||
validateRequiredParams(args, ["reason", "input"], "memory_apply_patch");
|
||||
|
||||
const reason = args.reason.trim();
|
||||
if (!reason) {
|
||||
throw new Error("memory_apply_patch: 'reason' must be a non-empty string");
|
||||
}
|
||||
|
||||
const input = args.input;
|
||||
if (typeof input !== "string" || !input.trim()) {
|
||||
throw new Error("memory_apply_patch: 'input' must be a non-empty string");
|
||||
}
|
||||
|
||||
const memoryDir = resolveMemoryDir();
|
||||
ensureMemoryRepo(memoryDir);
|
||||
|
||||
const ops = parsePatchOperations(memoryDir, input);
|
||||
if (ops.length === 0) {
|
||||
throw new Error("memory_apply_patch: no file operations found in patch");
|
||||
}
|
||||
|
||||
const pendingWrites = new Map<string, string>();
|
||||
const pendingDeletes = new Set<string>();
|
||||
const affectedPaths = new Set<string>();
|
||||
|
||||
const loadCurrentContent = async (
|
||||
relPath: string,
|
||||
sourcePathForErrors: string,
|
||||
): Promise<string> => {
|
||||
const absPath = resolveMemoryPath(memoryDir, relPath);
|
||||
if (pendingDeletes.has(absPath) && !pendingWrites.has(absPath)) {
|
||||
throw new Error(
|
||||
`memory_apply_patch: file not found for update: ${sourcePathForErrors}`,
|
||||
);
|
||||
}
|
||||
|
||||
const pending = pendingWrites.get(absPath);
|
||||
if (pending !== undefined) {
|
||||
return pending;
|
||||
}
|
||||
|
||||
const content = await readFile(absPath, "utf8").catch((error) => {
|
||||
const message = error instanceof Error ? error.message : String(error);
|
||||
throw new Error(
|
||||
`memory_apply_patch: failed to read ${sourcePathForErrors}: ${message}`,
|
||||
);
|
||||
});
|
||||
|
||||
return content.replace(/\r\n/g, "\n");
|
||||
};
|
||||
|
||||
for (const op of ops) {
|
||||
if (op.kind === "add") {
|
||||
const absPath = resolveMemoryFilePath(memoryDir, op.targetLabel);
|
||||
if (pendingWrites.has(absPath)) {
|
||||
throw new Error(
|
||||
`memory_apply_patch: duplicate add/update target in patch: ${op.targetRelPath}`,
|
||||
);
|
||||
}
|
||||
if (!(await isMissing(absPath))) {
|
||||
throw new Error(
|
||||
`memory_apply_patch: cannot add existing memory file: ${op.targetRelPath}`,
|
||||
);
|
||||
}
|
||||
|
||||
const rawContent = op.contentLines.join("\n");
|
||||
const rendered = normalizeAddedContent(op.targetLabel, rawContent);
|
||||
|
||||
pendingWrites.set(absPath, rendered);
|
||||
pendingDeletes.delete(absPath);
|
||||
affectedPaths.add(toRepoRelative(memoryDir, absPath));
|
||||
continue;
|
||||
}
|
||||
|
||||
if (op.kind === "delete") {
|
||||
const absPath = resolveMemoryFilePath(memoryDir, op.targetLabel);
|
||||
await loadEditableMemoryFile(absPath, op.targetRelPath);
|
||||
pendingWrites.delete(absPath);
|
||||
pendingDeletes.add(absPath);
|
||||
affectedPaths.add(toRepoRelative(memoryDir, absPath));
|
||||
continue;
|
||||
}
|
||||
|
||||
const sourceAbsPath = resolveMemoryFilePath(memoryDir, op.sourceLabel);
|
||||
const targetAbsPath = resolveMemoryFilePath(memoryDir, op.targetLabel);
|
||||
|
||||
const currentContent = await loadCurrentContent(
|
||||
op.sourceRelPath,
|
||||
op.sourceRelPath,
|
||||
);
|
||||
const currentParsed = parseMemoryFile(currentContent);
|
||||
if (currentParsed.frontmatter.read_only === "true") {
|
||||
throw new Error(
|
||||
`memory_apply_patch: ${op.sourceRelPath} is read_only and cannot be modified`,
|
||||
);
|
||||
}
|
||||
|
||||
let nextContent = currentContent;
|
||||
for (const hunk of op.hunks) {
|
||||
nextContent = applyHunk(nextContent, hunk.lines, op.sourceRelPath);
|
||||
}
|
||||
|
||||
const validated = parseMemoryFile(nextContent);
|
||||
if (validated.frontmatter.read_only === "true") {
|
||||
throw new Error(
|
||||
`memory_apply_patch: ${op.targetRelPath} cannot be written with read_only=true`,
|
||||
);
|
||||
}
|
||||
|
||||
pendingWrites.set(targetAbsPath, nextContent);
|
||||
pendingDeletes.delete(targetAbsPath);
|
||||
affectedPaths.add(toRepoRelative(memoryDir, targetAbsPath));
|
||||
|
||||
if (sourceAbsPath !== targetAbsPath) {
|
||||
if (!pendingDeletes.has(sourceAbsPath)) {
|
||||
pendingWrites.delete(sourceAbsPath);
|
||||
pendingDeletes.add(sourceAbsPath);
|
||||
}
|
||||
affectedPaths.add(toRepoRelative(memoryDir, sourceAbsPath));
|
||||
}
|
||||
}
|
||||
|
||||
for (const [absPath, content] of pendingWrites.entries()) {
|
||||
await mkdir(dirname(absPath), { recursive: true });
|
||||
await writeFile(absPath, content, "utf8");
|
||||
}
|
||||
|
||||
for (const absPath of pendingDeletes) {
|
||||
if (pendingWrites.has(absPath)) continue;
|
||||
if (await isMissing(absPath)) continue;
|
||||
const stats = await stat(absPath);
|
||||
if (stats.isDirectory()) {
|
||||
await rm(absPath, { recursive: true, force: false });
|
||||
} else {
|
||||
await unlink(absPath);
|
||||
}
|
||||
}
|
||||
|
||||
const pathspecs = Array.from(affectedPaths).filter((p) => p.length > 0);
|
||||
if (pathspecs.length === 0) {
|
||||
return { message: "memory_apply_patch completed with no changed paths." };
|
||||
}
|
||||
|
||||
const commitResult = await commitAndPush(memoryDir, pathspecs, reason);
|
||||
if (!commitResult.committed) {
|
||||
return {
|
||||
message:
|
||||
"memory_apply_patch made no effective changes; skipped commit and push.",
|
||||
};
|
||||
}
|
||||
|
||||
return {
|
||||
message: `memory_apply_patch applied and pushed (${commitResult.sha?.slice(0, 7) ?? "unknown"}).`,
|
||||
};
|
||||
}
|
||||
|
||||
function parsePatchOperations(
|
||||
memoryDir: string,
|
||||
input: string,
|
||||
): ParsedPatchOp[] {
|
||||
const lines = input.split(/\r?\n/);
|
||||
const beginIndex = lines.findIndex(
|
||||
(line) => line.trim() === "*** Begin Patch",
|
||||
);
|
||||
if (beginIndex !== 0) {
|
||||
throw new Error(
|
||||
'memory_apply_patch: patch must start with "*** Begin Patch"',
|
||||
);
|
||||
}
|
||||
|
||||
const endIndex = lines.findIndex((line) => line.trim() === "*** End Patch");
|
||||
if (endIndex === -1) {
|
||||
throw new Error('memory_apply_patch: patch must end with "*** End Patch"');
|
||||
}
|
||||
|
||||
for (let tail = endIndex + 1; tail < lines.length; tail += 1) {
|
||||
if ((lines[tail] ?? "").trim().length > 0) {
|
||||
throw new Error(
|
||||
"memory_apply_patch: unexpected content after *** End Patch",
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
const ops: ParsedPatchOp[] = [];
|
||||
let i = 1;
|
||||
|
||||
while (i < endIndex) {
|
||||
const line = lines[i]?.trim();
|
||||
if (!line) {
|
||||
i += 1;
|
||||
continue;
|
||||
}
|
||||
|
||||
if (line.startsWith("*** Add File:")) {
|
||||
const rawPath = line.replace("*** Add File:", "").trim();
|
||||
const label = normalizeMemoryLabel(memoryDir, rawPath, "Add File path");
|
||||
const targetRelPath = `${label}.md`;
|
||||
|
||||
i += 1;
|
||||
const contentLines: string[] = [];
|
||||
while (i < endIndex) {
|
||||
const raw = lines[i];
|
||||
if (raw === undefined || raw.startsWith("*** ")) {
|
||||
break;
|
||||
}
|
||||
if (!raw.startsWith("+")) {
|
||||
throw new Error(
|
||||
`memory_apply_patch: invalid Add File line at ${i + 1}: expected '+' prefix`,
|
||||
);
|
||||
}
|
||||
contentLines.push(raw.slice(1));
|
||||
i += 1;
|
||||
}
|
||||
|
||||
if (contentLines.length === 0) {
|
||||
throw new Error(
|
||||
`memory_apply_patch: Add File for ${rawPath} must include at least one + line`,
|
||||
);
|
||||
}
|
||||
|
||||
ops.push({
|
||||
kind: "add",
|
||||
targetLabel: label,
|
||||
targetRelPath,
|
||||
contentLines,
|
||||
});
|
||||
continue;
|
||||
}
|
||||
|
||||
if (line.startsWith("*** Update File:")) {
|
||||
const rawSourcePath = line.replace("*** Update File:", "").trim();
|
||||
const sourceLabel = normalizeMemoryLabel(
|
||||
memoryDir,
|
||||
rawSourcePath,
|
||||
"Update File path",
|
||||
);
|
||||
let targetLabel = sourceLabel;
|
||||
|
||||
i += 1;
|
||||
if (i < endIndex) {
|
||||
const moveLine = lines[i];
|
||||
if (moveLine?.startsWith("*** Move to:")) {
|
||||
const rawTargetPath = moveLine.replace("*** Move to:", "").trim();
|
||||
targetLabel = normalizeMemoryLabel(
|
||||
memoryDir,
|
||||
rawTargetPath,
|
||||
"Move to path",
|
||||
);
|
||||
i += 1;
|
||||
}
|
||||
}
|
||||
|
||||
const hunks: Hunk[] = [];
|
||||
while (i < endIndex) {
|
||||
const hLine = lines[i];
|
||||
if (hLine === undefined || hLine.startsWith("*** ")) {
|
||||
break;
|
||||
}
|
||||
|
||||
if (!hLine.startsWith("@@")) {
|
||||
throw new Error(
|
||||
`memory_apply_patch: invalid Update File body at ${i + 1}: expected '@@' hunk header`,
|
||||
);
|
||||
}
|
||||
|
||||
i += 1;
|
||||
const hunkLines: string[] = [];
|
||||
while (i < endIndex) {
|
||||
const l = lines[i];
|
||||
if (l === undefined || l.startsWith("@@") || l.startsWith("*** ")) {
|
||||
break;
|
||||
}
|
||||
if (l === "*** End of File") {
|
||||
i += 1;
|
||||
break;
|
||||
}
|
||||
if (
|
||||
l.startsWith(" ") ||
|
||||
l.startsWith("+") ||
|
||||
l.startsWith("-") ||
|
||||
l === ""
|
||||
) {
|
||||
hunkLines.push(l);
|
||||
} else {
|
||||
throw new Error(
|
||||
`memory_apply_patch: invalid hunk line at ${i + 1}: expected one of ' ', '+', '-'`,
|
||||
);
|
||||
}
|
||||
i += 1;
|
||||
}
|
||||
hunks.push({ lines: hunkLines });
|
||||
}
|
||||
|
||||
if (hunks.length === 0) {
|
||||
throw new Error(
|
||||
`memory_apply_patch: Update File for ${rawSourcePath} has no hunks`,
|
||||
);
|
||||
}
|
||||
|
||||
ops.push({
|
||||
kind: "update",
|
||||
sourceLabel,
|
||||
sourceRelPath: `${sourceLabel}.md`,
|
||||
targetLabel,
|
||||
targetRelPath: `${targetLabel}.md`,
|
||||
hunks,
|
||||
});
|
||||
continue;
|
||||
}
|
||||
|
||||
if (line.startsWith("*** Delete File:")) {
|
||||
const rawPath = line.replace("*** Delete File:", "").trim();
|
||||
const label = normalizeMemoryLabel(
|
||||
memoryDir,
|
||||
rawPath,
|
||||
"Delete File path",
|
||||
);
|
||||
ops.push({
|
||||
kind: "delete",
|
||||
targetLabel: label,
|
||||
targetRelPath: `${label}.md`,
|
||||
});
|
||||
i += 1;
|
||||
continue;
|
||||
}
|
||||
|
||||
throw new Error(
|
||||
`memory_apply_patch: unknown patch directive at line ${i + 1}: ${line}`,
|
||||
);
|
||||
}
|
||||
|
||||
return ops;
|
||||
}
|
||||
|
||||
function normalizeAddedContent(label: string, rawContent: string): string {
|
||||
try {
|
||||
const parsed = parseMemoryFile(rawContent);
|
||||
return renderMemoryFile(parsed.frontmatter, parsed.body);
|
||||
} catch {
|
||||
return renderMemoryFile(
|
||||
{
|
||||
description: `Memory block ${label}`,
|
||||
},
|
||||
rawContent,
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
function resolveMemoryDir(): string {
|
||||
const direct = process.env.MEMORY_DIR || process.env.LETTA_MEMORY_DIR;
|
||||
if (direct && direct.trim().length > 0) {
|
||||
return resolve(direct);
|
||||
}
|
||||
|
||||
const contextAgentId = (() => {
|
||||
try {
|
||||
return getCurrentAgentId().trim();
|
||||
} catch {
|
||||
return "";
|
||||
}
|
||||
})();
|
||||
|
||||
const agentId =
|
||||
contextAgentId ||
|
||||
(process.env.AGENT_ID || process.env.LETTA_AGENT_ID || "").trim();
|
||||
if (agentId && agentId.trim().length > 0) {
|
||||
return resolve(homedir(), ".letta", "agents", agentId, "memory");
|
||||
}
|
||||
|
||||
throw new Error(
|
||||
"memory_apply_patch: unable to resolve memory directory. Ensure MEMORY_DIR (or AGENT_ID) is available.",
|
||||
);
|
||||
}
|
||||
|
||||
function ensureMemoryRepo(memoryDir: string): void {
|
||||
if (!existsSync(memoryDir)) {
|
||||
throw new Error(
|
||||
`memory_apply_patch: memory directory does not exist: ${memoryDir}`,
|
||||
);
|
||||
}
|
||||
if (!existsSync(resolve(memoryDir, ".git"))) {
|
||||
throw new Error(
|
||||
`memory_apply_patch: ${memoryDir} is not a git repository. This tool requires a git-backed memory filesystem.`,
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
function normalizeMemoryLabel(
|
||||
memoryDir: string,
|
||||
inputPath: string,
|
||||
fieldName: string,
|
||||
): string {
|
||||
const raw = inputPath.trim();
|
||||
if (!raw) {
|
||||
throw new Error(
|
||||
`memory_apply_patch: '${fieldName}' must be a non-empty string`,
|
||||
);
|
||||
}
|
||||
|
||||
if (raw.startsWith("~/") || raw.startsWith("$HOME/")) {
|
||||
throw new Error(
|
||||
`memory_apply_patch: '${fieldName}' must be a memory-relative file path, not a home-relative filesystem path`,
|
||||
);
|
||||
}
|
||||
|
||||
const isWindowsAbsolute = /^[a-zA-Z]:[\\/]/.test(raw);
|
||||
if (isAbsolute(raw) || isWindowsAbsolute) {
|
||||
const absolutePath = resolve(raw);
|
||||
const relToMemory = relative(memoryDir, absolutePath);
|
||||
|
||||
if (
|
||||
relToMemory &&
|
||||
!relToMemory.startsWith("..") &&
|
||||
!isAbsolute(relToMemory)
|
||||
) {
|
||||
return normalizeRelativeMemoryLabel(relToMemory, fieldName);
|
||||
}
|
||||
|
||||
throw new Error(memoryPrefixError(memoryDir));
|
||||
}
|
||||
|
||||
return normalizeRelativeMemoryLabel(raw, fieldName);
|
||||
}
|
||||
|
||||
function normalizeRelativeMemoryLabel(
|
||||
inputPath: string,
|
||||
fieldName: string,
|
||||
): string {
|
||||
const raw = inputPath.trim();
|
||||
if (!raw) {
|
||||
throw new Error(
|
||||
`memory_apply_patch: '${fieldName}' must be a non-empty string`,
|
||||
);
|
||||
}
|
||||
|
||||
const normalized = raw.replace(/\\/g, "/");
|
||||
if (normalized.startsWith("/")) {
|
||||
throw new Error(
|
||||
`memory_apply_patch: '${fieldName}' must be a relative path like system/contacts.md`,
|
||||
);
|
||||
}
|
||||
|
||||
let label = normalized;
|
||||
label = label.replace(/^memory\//, "");
|
||||
label = label.replace(/\.md$/, "");
|
||||
|
||||
if (!label) {
|
||||
throw new Error(
|
||||
`memory_apply_patch: '${fieldName}' resolves to an empty memory label`,
|
||||
);
|
||||
}
|
||||
|
||||
const segments = label.split("/").filter(Boolean);
|
||||
if (segments.length === 0) {
|
||||
throw new Error(
|
||||
`memory_apply_patch: '${fieldName}' resolves to an empty memory label`,
|
||||
);
|
||||
}
|
||||
|
||||
for (const segment of segments) {
|
||||
if (segment === "." || segment === "..") {
|
||||
throw new Error(
|
||||
`memory_apply_patch: '${fieldName}' contains invalid path traversal segment`,
|
||||
);
|
||||
}
|
||||
if (segment.includes("\0")) {
|
||||
throw new Error(
|
||||
`memory_apply_patch: '${fieldName}' contains invalid null bytes`,
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
return segments.join("/");
|
||||
}
|
||||
|
||||
function memoryPrefixError(memoryDir: string): string {
|
||||
return `The memory_apply_patch tool can only be used to modify files in {${memoryDir}} or provided as a relative path`;
|
||||
}
|
||||
|
||||
function resolveMemoryPath(memoryDir: string, path: string): string {
|
||||
const absolute = resolve(memoryDir, path);
|
||||
const rel = relative(memoryDir, absolute);
|
||||
if (rel.startsWith("..") || isAbsolute(rel)) {
|
||||
throw new Error(
|
||||
"memory_apply_patch: resolved path escapes memory directory",
|
||||
);
|
||||
}
|
||||
return absolute;
|
||||
}
|
||||
|
||||
function resolveMemoryFilePath(memoryDir: string, label: string): string {
|
||||
return resolveMemoryPath(memoryDir, `${label}.md`);
|
||||
}
|
||||
|
||||
function toRepoRelative(memoryDir: string, absolutePath: string): string {
|
||||
const rel = relative(memoryDir, absolutePath);
|
||||
if (!rel || rel.startsWith("..") || isAbsolute(rel)) {
|
||||
throw new Error("memory_apply_patch: path is outside memory repository");
|
||||
}
|
||||
return rel.replace(/\\/g, "/");
|
||||
}
|
||||
|
||||
async function loadEditableMemoryFile(
|
||||
filePath: string,
|
||||
sourcePath: string,
|
||||
): Promise<ParsedMemoryFile> {
|
||||
const content = await readFile(filePath, "utf8").catch((error) => {
|
||||
const message = error instanceof Error ? error.message : String(error);
|
||||
throw new Error(
|
||||
`memory_apply_patch: failed to read ${sourcePath}: ${message}`,
|
||||
);
|
||||
});
|
||||
|
||||
const parsed = parseMemoryFile(content);
|
||||
if (parsed.frontmatter.read_only === "true") {
|
||||
throw new Error(
|
||||
`memory_apply_patch: ${sourcePath} is read_only and cannot be modified`,
|
||||
);
|
||||
}
|
||||
return parsed;
|
||||
}
|
||||
|
||||
function parseMemoryFile(content: string): ParsedMemoryFile {
|
||||
const match = content.match(/^---\r?\n([\s\S]*?)\r?\n---\r?\n?([\s\S]*)$/);
|
||||
if (!match) {
|
||||
throw new Error(
|
||||
"memory_apply_patch: target file is missing required frontmatter",
|
||||
);
|
||||
}
|
||||
|
||||
const frontmatterText = match[1] ?? "";
|
||||
const body = match[2] ?? "";
|
||||
|
||||
let description: string | undefined;
|
||||
let readOnly: string | undefined;
|
||||
|
||||
for (const line of frontmatterText.split(/\r?\n/)) {
|
||||
const idx = line.indexOf(":");
|
||||
if (idx <= 0) continue;
|
||||
|
||||
const key = line.slice(0, idx).trim();
|
||||
const value = line.slice(idx + 1).trim();
|
||||
|
||||
if (key === "description") {
|
||||
description = value;
|
||||
} else if (key === "read_only") {
|
||||
readOnly = value;
|
||||
}
|
||||
}
|
||||
|
||||
if (!description || !description.trim()) {
|
||||
throw new Error(
|
||||
"memory_apply_patch: target file frontmatter is missing 'description'",
|
||||
);
|
||||
}
|
||||
|
||||
return {
|
||||
frontmatter: {
|
||||
description,
|
||||
...(readOnly !== undefined ? { read_only: readOnly } : {}),
|
||||
},
|
||||
body,
|
||||
};
|
||||
}
|
||||
|
||||
function renderMemoryFile(
|
||||
frontmatter: { description: string; read_only?: string },
|
||||
body: string,
|
||||
): string {
|
||||
const description = frontmatter.description.trim();
|
||||
if (!description) {
|
||||
throw new Error("memory_apply_patch: 'description' must not be empty");
|
||||
}
|
||||
|
||||
const lines = [
|
||||
"---",
|
||||
`description: ${sanitizeFrontmatterValue(description)}`,
|
||||
];
|
||||
|
||||
if (frontmatter.read_only !== undefined) {
|
||||
lines.push(`read_only: ${frontmatter.read_only}`);
|
||||
}
|
||||
|
||||
lines.push("---");
|
||||
|
||||
const header = lines.join("\n");
|
||||
if (!body) {
|
||||
return `${header}\n`;
|
||||
}
|
||||
return `${header}\n${body}`;
|
||||
}
|
||||
|
||||
function sanitizeFrontmatterValue(value: string): string {
|
||||
return value.replace(/\r?\n/g, " ").trim();
|
||||
}
|
||||
|
||||
async function runGit(
|
||||
memoryDir: string,
|
||||
args: string[],
|
||||
): Promise<{ stdout: string; stderr: string }> {
|
||||
try {
|
||||
const result = await execFile("git", args, {
|
||||
cwd: memoryDir,
|
||||
maxBuffer: 10 * 1024 * 1024,
|
||||
env: {
|
||||
...process.env,
|
||||
PAGER: "cat",
|
||||
GIT_PAGER: "cat",
|
||||
},
|
||||
});
|
||||
|
||||
return {
|
||||
stdout: result.stdout?.toString() ?? "",
|
||||
stderr: result.stderr?.toString() ?? "",
|
||||
};
|
||||
} catch (error) {
|
||||
const stderr =
|
||||
typeof error === "object" && error !== null && "stderr" in error
|
||||
? String((error as { stderr?: string }).stderr ?? "")
|
||||
: "";
|
||||
const stdout =
|
||||
typeof error === "object" && error !== null && "stdout" in error
|
||||
? String((error as { stdout?: string }).stdout ?? "")
|
||||
: "";
|
||||
const message = error instanceof Error ? error.message : String(error);
|
||||
|
||||
throw new Error(
|
||||
`git ${args.join(" ")} failed: ${stderr || stdout || message}`.trim(),
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
async function commitAndPush(
|
||||
memoryDir: string,
|
||||
pathspecs: string[],
|
||||
reason: string,
|
||||
): Promise<{ committed: boolean; sha?: string }> {
|
||||
await runGit(memoryDir, ["add", "-A", "--", ...pathspecs]);
|
||||
|
||||
const status = await runGit(memoryDir, [
|
||||
"status",
|
||||
"--porcelain",
|
||||
"--",
|
||||
...pathspecs,
|
||||
]);
|
||||
if (!status.stdout.trim()) {
|
||||
return { committed: false };
|
||||
}
|
||||
|
||||
const { agentId, agentName } = await getAgentIdentity();
|
||||
const authorName = agentName.trim() || agentId;
|
||||
const authorEmail = `${agentId}@letta.com`;
|
||||
|
||||
await runGit(memoryDir, [
|
||||
"-c",
|
||||
`user.name=${authorName}`,
|
||||
"-c",
|
||||
`user.email=${authorEmail}`,
|
||||
"commit",
|
||||
"-m",
|
||||
reason,
|
||||
]);
|
||||
|
||||
const head = await runGit(memoryDir, ["rev-parse", "HEAD"]);
|
||||
const sha = head.stdout.trim();
|
||||
|
||||
try {
|
||||
await runGit(memoryDir, ["push"]);
|
||||
} catch (error) {
|
||||
const message = error instanceof Error ? error.message : String(error);
|
||||
throw new Error(
|
||||
`Memory changes were committed (${sha.slice(0, 7)}) but push failed: ${message}`,
|
||||
);
|
||||
}
|
||||
|
||||
return {
|
||||
committed: true,
|
||||
sha,
|
||||
};
|
||||
}
|
||||
|
||||
async function isMissing(filePath: string): Promise<boolean> {
|
||||
try {
|
||||
await access(filePath);
|
||||
return false;
|
||||
} catch {
|
||||
return true;
|
||||
}
|
||||
}
|
||||
|
||||
function applyHunk(
|
||||
content: string,
|
||||
hunkLines: string[],
|
||||
filePath: string,
|
||||
): string {
|
||||
const { oldChunk, newChunk } = buildOldNewChunks(hunkLines);
|
||||
if (oldChunk.length === 0) {
|
||||
throw new Error(
|
||||
`memory_apply_patch: failed to apply hunk to ${filePath}: hunk has no anchor/context`,
|
||||
);
|
||||
}
|
||||
|
||||
const index = content.indexOf(oldChunk);
|
||||
if (index !== -1) {
|
||||
return (
|
||||
content.slice(0, index) +
|
||||
newChunk +
|
||||
content.slice(index + oldChunk.length)
|
||||
);
|
||||
}
|
||||
|
||||
if (oldChunk.endsWith("\n")) {
|
||||
const oldWithoutTrailingNewline = oldChunk.slice(0, -1);
|
||||
const indexWithoutTrailingNewline = content.indexOf(
|
||||
oldWithoutTrailingNewline,
|
||||
);
|
||||
if (indexWithoutTrailingNewline !== -1) {
|
||||
const replacement = newChunk.endsWith("\n")
|
||||
? newChunk.slice(0, -1)
|
||||
: newChunk;
|
||||
return (
|
||||
content.slice(0, indexWithoutTrailingNewline) +
|
||||
replacement +
|
||||
content.slice(
|
||||
indexWithoutTrailingNewline + oldWithoutTrailingNewline.length,
|
||||
)
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
throw new Error(
|
||||
`memory_apply_patch: failed to apply hunk to ${filePath}: context not found`,
|
||||
);
|
||||
}
|
||||
|
||||
function buildOldNewChunks(lines: string[]): {
|
||||
oldChunk: string;
|
||||
newChunk: string;
|
||||
} {
|
||||
const oldParts: string[] = [];
|
||||
const newParts: string[] = [];
|
||||
|
||||
for (const raw of lines) {
|
||||
if (raw === "") {
|
||||
oldParts.push("\n");
|
||||
newParts.push("\n");
|
||||
continue;
|
||||
}
|
||||
|
||||
const prefix = raw[0];
|
||||
const text = raw.slice(1);
|
||||
|
||||
if (prefix === " ") {
|
||||
oldParts.push(`${text}\n`);
|
||||
newParts.push(`${text}\n`);
|
||||
} else if (prefix === "-") {
|
||||
oldParts.push(`${text}\n`);
|
||||
} else if (prefix === "+") {
|
||||
newParts.push(`${text}\n`);
|
||||
}
|
||||
}
|
||||
|
||||
return {
|
||||
oldChunk: oldParts.join(""),
|
||||
newChunk: newParts.join(""),
|
||||
};
|
||||
}
|
||||
@@ -35,6 +35,7 @@ const FILE_MODIFYING_TOOLS = new Set([
|
||||
"ShellCommand",
|
||||
"shell_command",
|
||||
"apply_patch",
|
||||
"memory_apply_patch",
|
||||
// Gemini toolset
|
||||
"Replace",
|
||||
"replace",
|
||||
@@ -120,7 +121,7 @@ export const OPENAI_DEFAULT_TOOLS: ToolName[] = [
|
||||
// TODO(codex-parity): add once request_user_input tool exists in raw codex path.
|
||||
// "request_user_input",
|
||||
"apply_patch",
|
||||
"memory",
|
||||
"memory_apply_patch",
|
||||
"update_plan",
|
||||
"view_image",
|
||||
];
|
||||
@@ -146,7 +147,7 @@ export const OPENAI_PASCAL_TOOLS: ToolName[] = [
|
||||
"AskUserQuestion",
|
||||
"EnterPlanMode",
|
||||
"ExitPlanMode",
|
||||
"memory",
|
||||
"memory_apply_patch",
|
||||
"Task",
|
||||
"TaskOutput",
|
||||
"TaskStop",
|
||||
@@ -193,6 +194,7 @@ const TOOL_PERMISSIONS: Record<ToolName, { requiresApproval: boolean }> = {
|
||||
TaskStop: { requiresApproval: true },
|
||||
LS: { requiresApproval: false },
|
||||
memory: { requiresApproval: true },
|
||||
memory_apply_patch: { requiresApproval: true },
|
||||
MultiEdit: { requiresApproval: true },
|
||||
Read: { requiresApproval: false },
|
||||
view_image: { requiresApproval: false },
|
||||
|
||||
16
src/tools/schemas/MemoryApplyPatch.json
Normal file
16
src/tools/schemas/MemoryApplyPatch.json
Normal file
@@ -0,0 +1,16 @@
|
||||
{
|
||||
"$schema": "http://json-schema.org/draft-07/schema#",
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"reason": {
|
||||
"type": "string",
|
||||
"description": "Required commit message for this memory patch change. Used as the git commit message."
|
||||
},
|
||||
"input": {
|
||||
"type": "string",
|
||||
"description": "The entire contents of the apply_patch command, constrained to memory files under MEMORY_DIR."
|
||||
}
|
||||
},
|
||||
"required": ["reason", "input"],
|
||||
"additionalProperties": false
|
||||
}
|
||||
@@ -15,6 +15,7 @@ import ListDirCodexDescription from "./descriptions/ListDirCodex.md";
|
||||
import ListDirectoryGeminiDescription from "./descriptions/ListDirectoryGemini.md";
|
||||
import LSDescription from "./descriptions/LS.md";
|
||||
import MemoryDescription from "./descriptions/Memory.md";
|
||||
import MemoryApplyPatchDescription from "./descriptions/MemoryApplyPatch.md";
|
||||
import MultiEditDescription from "./descriptions/MultiEdit.md";
|
||||
import ReadDescription from "./descriptions/Read.md";
|
||||
import ReadFileCodexDescription from "./descriptions/ReadFileCodex.md";
|
||||
@@ -53,6 +54,7 @@ import { list_dir } from "./impl/ListDirCodex";
|
||||
import { list_directory } from "./impl/ListDirectoryGemini";
|
||||
import { ls } from "./impl/LS";
|
||||
import { memory } from "./impl/Memory";
|
||||
import { memory_apply_patch } from "./impl/MemoryApplyPatch";
|
||||
import { multi_edit } from "./impl/MultiEdit";
|
||||
import { read } from "./impl/Read";
|
||||
import { read_file } from "./impl/ReadFileCodex";
|
||||
@@ -91,6 +93,7 @@ import ListDirCodexSchema from "./schemas/ListDirCodex.json";
|
||||
import ListDirectoryGeminiSchema from "./schemas/ListDirectoryGemini.json";
|
||||
import LSSchema from "./schemas/LS.json";
|
||||
import MemorySchema from "./schemas/Memory.json";
|
||||
import MemoryApplyPatchSchema from "./schemas/MemoryApplyPatch.json";
|
||||
import MultiEditSchema from "./schemas/MultiEdit.json";
|
||||
import ReadSchema from "./schemas/Read.json";
|
||||
import ReadFileCodexSchema from "./schemas/ReadFileCodex.json";
|
||||
@@ -187,6 +190,11 @@ const toolDefinitions = {
|
||||
description: MemoryDescription.trim(),
|
||||
impl: memory as unknown as ToolImplementation,
|
||||
},
|
||||
memory_apply_patch: {
|
||||
schema: MemoryApplyPatchSchema,
|
||||
description: MemoryApplyPatchDescription.trim(),
|
||||
impl: memory_apply_patch as unknown as ToolImplementation,
|
||||
},
|
||||
MultiEdit: {
|
||||
schema: MultiEditSchema,
|
||||
description: MultiEditDescription.trim(),
|
||||
|
||||
@@ -50,27 +50,23 @@ export function deriveToolsetFromModel(
|
||||
}
|
||||
|
||||
/**
|
||||
* Ensures the correct memory tool is attached to the agent based on the model.
|
||||
* - OpenAI/Codex models use memory_apply_patch
|
||||
* - Claude/Gemini models use memory
|
||||
* Ensures the server-side memory tool is attached to the agent.
|
||||
* Client toolsets may use memory_apply_patch, but server-side base memory tool remains memory.
|
||||
*
|
||||
* This is a server-side tool swap - client tools are passed via client_tools per-request.
|
||||
*
|
||||
* @param agentId - The agent ID to update
|
||||
* @param modelIdentifier - Model handle to determine which memory tool to use
|
||||
* @param useMemoryPatch - Optional override: true = use memory_apply_patch, false = use memory
|
||||
* @param modelIdentifier - Model handle (kept for API compatibility)
|
||||
* @param useMemoryPatch - Unused compatibility parameter
|
||||
*/
|
||||
export async function ensureCorrectMemoryTool(
|
||||
agentId: string,
|
||||
modelIdentifier: string,
|
||||
useMemoryPatch?: boolean,
|
||||
): Promise<void> {
|
||||
const resolvedModel = resolveModel(modelIdentifier) ?? modelIdentifier;
|
||||
void resolveModel(modelIdentifier);
|
||||
void useMemoryPatch;
|
||||
const client = await getClient();
|
||||
const shouldUsePatch =
|
||||
useMemoryPatch !== undefined
|
||||
? useMemoryPatch
|
||||
: isOpenAIModel(resolvedModel);
|
||||
|
||||
try {
|
||||
// Need full agent state for tool_rules, so use retrieve with include
|
||||
@@ -89,8 +85,8 @@ export async function ensureCorrectMemoryTool(
|
||||
}
|
||||
|
||||
// Determine which memory tool we want
|
||||
// Only OpenAI (Codex) uses memory_apply_patch; Claude and Gemini use memory
|
||||
const desiredMemoryTool = shouldUsePatch ? "memory_apply_patch" : "memory";
|
||||
// OpenAI/Codex models use client-side memory_apply_patch now; keep server memory tool as "memory" for all models
|
||||
const desiredMemoryTool = "memory";
|
||||
const otherMemoryTool =
|
||||
desiredMemoryTool === "memory" ? "memory_apply_patch" : "memory";
|
||||
|
||||
@@ -184,9 +180,8 @@ export async function reattachMemoryTool(
|
||||
agentId: string,
|
||||
modelIdentifier: string,
|
||||
): Promise<void> {
|
||||
const resolvedModel = resolveModel(modelIdentifier) ?? modelIdentifier;
|
||||
void resolveModel(modelIdentifier);
|
||||
const client = await getClient();
|
||||
const shouldUsePatch = isOpenAIModel(resolvedModel);
|
||||
|
||||
try {
|
||||
const agentWithTools = await client.agents.retrieve(agentId, {
|
||||
@@ -196,7 +191,7 @@ export async function reattachMemoryTool(
|
||||
const mapByName = new Map(currentTools.map((t) => [t.name, t.id]));
|
||||
|
||||
// Determine which memory tool we want
|
||||
const desiredMemoryTool = shouldUsePatch ? "memory_apply_patch" : "memory";
|
||||
const desiredMemoryTool = "memory";
|
||||
|
||||
// Already has the tool?
|
||||
if (mapByName.has(desiredMemoryTool)) {
|
||||
@@ -303,8 +298,7 @@ export async function forceToolsetSwitch(
|
||||
modelForLoading = "anthropic/claude-sonnet-4";
|
||||
}
|
||||
|
||||
// Ensure base memory tool is correct for the toolset
|
||||
// Codex uses memory_apply_patch; Claude and Gemini use memory
|
||||
// Ensure base server memory tool is correct for the toolset
|
||||
const useMemoryPatch =
|
||||
toolsetName === "codex" || toolsetName === "codex_snake";
|
||||
await ensureCorrectMemoryTool(agentId, modelForLoading, useMemoryPatch);
|
||||
@@ -345,7 +339,7 @@ export async function switchToolsetForModel(
|
||||
}
|
||||
}
|
||||
|
||||
// Ensure base memory tool is correct for the model
|
||||
// Ensure base server memory tool is attached
|
||||
await ensureCorrectMemoryTool(agentId, resolvedModel);
|
||||
|
||||
const toolsetName = deriveToolsetFromModel(resolvedModel);
|
||||
|
||||
@@ -136,6 +136,7 @@ export interface DeviceStatus {
|
||||
current_available_skills: AvailableSkillSummary[];
|
||||
background_processes: BackgroundProcessSummary[];
|
||||
pending_control_requests: PendingControlRequest[];
|
||||
memory_directory: string | null;
|
||||
}
|
||||
|
||||
export type LoopStatus =
|
||||
@@ -363,6 +364,8 @@ export interface TerminalSpawnCommand {
|
||||
terminal_id: string;
|
||||
cols: number;
|
||||
rows: number;
|
||||
/** Agent's current working directory. Falls back to bootWorkingDirectory if absent. */
|
||||
cwd?: string;
|
||||
}
|
||||
|
||||
export interface TerminalInputCommand {
|
||||
@@ -393,6 +396,42 @@ export interface SearchFilesCommand {
|
||||
max_results?: number;
|
||||
}
|
||||
|
||||
export interface ListInDirectoryCommand {
|
||||
type: "list_in_directory";
|
||||
/** Absolute path to list entries in. */
|
||||
path: string;
|
||||
/** When true, response includes non-directory entries in `files`. */
|
||||
include_files?: boolean;
|
||||
/** Max entries to return (folders + files combined). */
|
||||
limit?: number;
|
||||
/** Number of entries to skip before returning. */
|
||||
offset?: number;
|
||||
}
|
||||
|
||||
export interface ReadFileCommand {
|
||||
type: "read_file";
|
||||
/** Absolute path to the file to read. */
|
||||
path: string;
|
||||
/** Echoed back in the response for request correlation. */
|
||||
request_id: string;
|
||||
}
|
||||
|
||||
export interface ListMemoryCommand {
|
||||
type: "list_memory";
|
||||
/** Echoed back in every response chunk for request correlation. */
|
||||
request_id: string;
|
||||
/** The agent whose memory to list. */
|
||||
agent_id: string;
|
||||
}
|
||||
|
||||
export interface EnableMemfsCommand {
|
||||
type: "enable_memfs";
|
||||
/** Echoed back in the response for request correlation. */
|
||||
request_id: string;
|
||||
/** The agent to enable memfs for. */
|
||||
agent_id: string;
|
||||
}
|
||||
|
||||
export type WsProtocolCommand =
|
||||
| InputCommand
|
||||
| ChangeDeviceStateCommand
|
||||
@@ -402,7 +441,11 @@ export type WsProtocolCommand =
|
||||
| TerminalInputCommand
|
||||
| TerminalResizeCommand
|
||||
| TerminalKillCommand
|
||||
| SearchFilesCommand;
|
||||
| SearchFilesCommand
|
||||
| ListInDirectoryCommand
|
||||
| ReadFileCommand
|
||||
| ListMemoryCommand
|
||||
| EnableMemfsCommand;
|
||||
|
||||
export type WsProtocolMessage =
|
||||
| DeviceStatusUpdateMessage
|
||||
|
||||
@@ -57,11 +57,18 @@ import {
|
||||
stashRecoveredApprovalInterrupts,
|
||||
} from "./interrupts";
|
||||
import {
|
||||
getConversationPermissionModeState,
|
||||
getOrCreateConversationPermissionModeStateRef,
|
||||
loadPersistedPermissionModeMap,
|
||||
setConversationPermissionModeState,
|
||||
persistPermissionModeMapForRuntime,
|
||||
} from "./permissionMode";
|
||||
import { isSearchFilesCommand, parseServerMessage } from "./protocol-inbound";
|
||||
import {
|
||||
isEnableMemfsCommand,
|
||||
isListInDirectoryCommand,
|
||||
isListMemoryCommand,
|
||||
isReadFileCommand,
|
||||
isSearchFilesCommand,
|
||||
parseServerMessage,
|
||||
} from "./protocol-inbound";
|
||||
import {
|
||||
buildDeviceStatus,
|
||||
buildLoopStatus,
|
||||
@@ -141,32 +148,30 @@ function handleModeChange(
|
||||
try {
|
||||
const agentId = scope?.agent_id ?? null;
|
||||
const conversationId = scope?.conversation_id ?? "default";
|
||||
const current = getConversationPermissionModeState(
|
||||
const current = getOrCreateConversationPermissionModeStateRef(
|
||||
runtime,
|
||||
agentId,
|
||||
conversationId,
|
||||
);
|
||||
|
||||
const next = { ...current };
|
||||
|
||||
// Track previous mode so ExitPlanMode can restore it
|
||||
if (msg.mode === "plan" && current.mode !== "plan") {
|
||||
next.modeBeforePlan = current.mode;
|
||||
current.modeBeforePlan = current.mode;
|
||||
}
|
||||
next.mode = msg.mode;
|
||||
current.mode = msg.mode;
|
||||
|
||||
// Generate plan file path when entering plan mode
|
||||
if (msg.mode === "plan" && !current.planFilePath) {
|
||||
next.planFilePath = generatePlanFilePath();
|
||||
current.planFilePath = generatePlanFilePath();
|
||||
}
|
||||
|
||||
// Clear plan-related state when leaving plan mode
|
||||
if (msg.mode !== "plan") {
|
||||
next.planFilePath = null;
|
||||
next.modeBeforePlan = null;
|
||||
current.planFilePath = null;
|
||||
current.modeBeforePlan = null;
|
||||
}
|
||||
|
||||
setConversationPermissionModeState(runtime, agentId, conversationId, next);
|
||||
persistPermissionModeMapForRuntime(runtime);
|
||||
|
||||
emitDeviceStatusUpdate(socket, runtime, scope);
|
||||
|
||||
@@ -1016,9 +1021,257 @@ async function connectWithRetry(
|
||||
return;
|
||||
}
|
||||
|
||||
// ── Directory listing (no runtime scope required) ──────────────────
|
||||
if (isListInDirectoryCommand(parsed)) {
|
||||
void (async () => {
|
||||
try {
|
||||
const { readdir } = await import("node:fs/promises");
|
||||
const entries = await readdir(parsed.path, { withFileTypes: true });
|
||||
|
||||
// Filter out OS/VCS noise before sorting
|
||||
const IGNORED_NAMES = new Set([
|
||||
".DS_Store",
|
||||
".git",
|
||||
".gitignore",
|
||||
"Thumbs.db",
|
||||
]);
|
||||
const sortedEntries = entries
|
||||
.filter((e) => !IGNORED_NAMES.has(e.name))
|
||||
.sort((a, b) => a.name.localeCompare(b.name));
|
||||
|
||||
const allFolders: string[] = [];
|
||||
const allFiles: string[] = [];
|
||||
for (const e of sortedEntries) {
|
||||
if (e.isDirectory()) {
|
||||
allFolders.push(e.name);
|
||||
} else if (parsed.include_files) {
|
||||
allFiles.push(e.name);
|
||||
}
|
||||
}
|
||||
|
||||
const total = allFolders.length + allFiles.length;
|
||||
const offset = parsed.offset ?? 0;
|
||||
const limit = parsed.limit ?? total;
|
||||
|
||||
// Paginate over the combined [folders, files] list
|
||||
const combined = [...allFolders, ...allFiles];
|
||||
const page = combined.slice(offset, offset + limit);
|
||||
const folders = page.filter((name) => allFolders.includes(name));
|
||||
const files = page.filter((name) => allFiles.includes(name));
|
||||
|
||||
const response: Record<string, unknown> = {
|
||||
type: "list_in_directory_response",
|
||||
path: parsed.path,
|
||||
folders,
|
||||
hasMore: offset + limit < total,
|
||||
total,
|
||||
success: true,
|
||||
};
|
||||
if (parsed.include_files) {
|
||||
response.files = files;
|
||||
}
|
||||
socket.send(JSON.stringify(response));
|
||||
} catch (err) {
|
||||
socket.send(
|
||||
JSON.stringify({
|
||||
type: "list_in_directory_response",
|
||||
path: parsed.path,
|
||||
folders: [],
|
||||
hasMore: false,
|
||||
success: false,
|
||||
error:
|
||||
err instanceof Error ? err.message : "Failed to list directory",
|
||||
}),
|
||||
);
|
||||
}
|
||||
})();
|
||||
return;
|
||||
}
|
||||
|
||||
// ── File reading (no runtime scope required) ─────────────────────
|
||||
if (isReadFileCommand(parsed)) {
|
||||
void (async () => {
|
||||
try {
|
||||
const { readFile } = await import("node:fs/promises");
|
||||
const content = await readFile(parsed.path, "utf-8");
|
||||
socket.send(
|
||||
JSON.stringify({
|
||||
type: "read_file_response",
|
||||
request_id: parsed.request_id,
|
||||
path: parsed.path,
|
||||
content,
|
||||
success: true,
|
||||
}),
|
||||
);
|
||||
} catch (err) {
|
||||
socket.send(
|
||||
JSON.stringify({
|
||||
type: "read_file_response",
|
||||
request_id: parsed.request_id,
|
||||
path: parsed.path,
|
||||
content: null,
|
||||
success: false,
|
||||
error: err instanceof Error ? err.message : "Failed to read file",
|
||||
}),
|
||||
);
|
||||
}
|
||||
})();
|
||||
return;
|
||||
}
|
||||
|
||||
// ── Memory index (no runtime scope required) ─────────────────────
|
||||
if (isListMemoryCommand(parsed)) {
|
||||
void (async () => {
|
||||
try {
|
||||
const { getMemoryFilesystemRoot } = await import(
|
||||
"../../agent/memoryFilesystem"
|
||||
);
|
||||
const { scanMemoryFilesystem, getFileNodes, readFileContent } =
|
||||
await import("../../agent/memoryScanner");
|
||||
const { parseFrontmatter } = await import("../../utils/frontmatter");
|
||||
|
||||
const { existsSync } = await import("node:fs");
|
||||
const { join } = await import("node:path");
|
||||
|
||||
const memoryRoot = getMemoryFilesystemRoot(parsed.agent_id);
|
||||
|
||||
// If the memory directory doesn't have a git repo, memfs
|
||||
// hasn't been initialized — tell the UI so it can show the
|
||||
// enable button instead of an empty file list.
|
||||
const memfsInitialized = existsSync(join(memoryRoot, ".git"));
|
||||
|
||||
if (!memfsInitialized) {
|
||||
socket.send(
|
||||
JSON.stringify({
|
||||
type: "list_memory_response",
|
||||
request_id: parsed.request_id,
|
||||
entries: [],
|
||||
done: true,
|
||||
total: 0,
|
||||
success: true,
|
||||
memfs_initialized: false,
|
||||
}),
|
||||
);
|
||||
return;
|
||||
}
|
||||
|
||||
const treeNodes = scanMemoryFilesystem(memoryRoot);
|
||||
const fileNodes = getFileNodes(treeNodes).filter((n) =>
|
||||
n.name.endsWith(".md"),
|
||||
);
|
||||
|
||||
const CHUNK_SIZE = 5;
|
||||
const total = fileNodes.length;
|
||||
|
||||
for (let i = 0; i < total; i += CHUNK_SIZE) {
|
||||
const chunk = fileNodes.slice(i, i + CHUNK_SIZE);
|
||||
const entries = chunk.map((node) => {
|
||||
const raw = readFileContent(node.fullPath);
|
||||
const { frontmatter, body } = parseFrontmatter(raw);
|
||||
const desc = frontmatter.description;
|
||||
return {
|
||||
relative_path: node.relativePath,
|
||||
is_system:
|
||||
node.relativePath.startsWith("system/") ||
|
||||
node.relativePath.startsWith("system\\"),
|
||||
description: typeof desc === "string" ? desc : null,
|
||||
content: body,
|
||||
size: body.length,
|
||||
};
|
||||
});
|
||||
|
||||
const done = i + CHUNK_SIZE >= total;
|
||||
socket.send(
|
||||
JSON.stringify({
|
||||
type: "list_memory_response",
|
||||
request_id: parsed.request_id,
|
||||
entries,
|
||||
done,
|
||||
total,
|
||||
success: true,
|
||||
memfs_initialized: true,
|
||||
}),
|
||||
);
|
||||
}
|
||||
|
||||
// Edge case: no files at all (repo exists but empty)
|
||||
if (total === 0) {
|
||||
socket.send(
|
||||
JSON.stringify({
|
||||
type: "list_memory_response",
|
||||
request_id: parsed.request_id,
|
||||
entries: [],
|
||||
done: true,
|
||||
total: 0,
|
||||
success: true,
|
||||
memfs_initialized: true,
|
||||
}),
|
||||
);
|
||||
}
|
||||
} catch (err) {
|
||||
socket.send(
|
||||
JSON.stringify({
|
||||
type: "list_memory_response",
|
||||
request_id: parsed.request_id,
|
||||
entries: [],
|
||||
done: true,
|
||||
total: 0,
|
||||
success: false,
|
||||
error:
|
||||
err instanceof Error ? err.message : "Failed to list memory",
|
||||
}),
|
||||
);
|
||||
}
|
||||
})();
|
||||
return;
|
||||
}
|
||||
|
||||
// ── Enable memfs command ────────────────────────────────────────────
|
||||
if (isEnableMemfsCommand(parsed)) {
|
||||
void (async () => {
|
||||
try {
|
||||
const { applyMemfsFlags } = await import(
|
||||
"../../agent/memoryFilesystem"
|
||||
);
|
||||
const result = await applyMemfsFlags(parsed.agent_id, true, false);
|
||||
socket.send(
|
||||
JSON.stringify({
|
||||
type: "enable_memfs_response",
|
||||
request_id: parsed.request_id,
|
||||
success: true,
|
||||
memory_directory: result.memoryDir,
|
||||
}),
|
||||
);
|
||||
// Push memory_updated so the UI auto-refreshes its file list
|
||||
socket.send(
|
||||
JSON.stringify({
|
||||
type: "memory_updated",
|
||||
affected_paths: ["*"],
|
||||
timestamp: Date.now(),
|
||||
}),
|
||||
);
|
||||
} catch (err) {
|
||||
socket.send(
|
||||
JSON.stringify({
|
||||
type: "enable_memfs_response",
|
||||
request_id: parsed.request_id,
|
||||
success: false,
|
||||
error:
|
||||
err instanceof Error ? err.message : "Failed to enable memfs",
|
||||
}),
|
||||
);
|
||||
}
|
||||
})();
|
||||
return;
|
||||
}
|
||||
|
||||
// ── Terminal commands (no runtime scope required) ──────────────────
|
||||
if (parsed.type === "terminal_spawn") {
|
||||
handleTerminalSpawn(parsed, socket, runtime.bootWorkingDirectory);
|
||||
handleTerminalSpawn(
|
||||
parsed,
|
||||
socket,
|
||||
parsed.cwd ?? runtime.bootWorkingDirectory,
|
||||
);
|
||||
return;
|
||||
}
|
||||
|
||||
|
||||
@@ -471,7 +471,11 @@ export function consumeInterruptQueue(
|
||||
agentId: string,
|
||||
conversationId: string,
|
||||
): {
|
||||
approvalMessage: { type: "approval"; approvals: ApprovalResult[] };
|
||||
approvalMessage: {
|
||||
type: "approval";
|
||||
approvals: ApprovalResult[];
|
||||
otid?: string;
|
||||
};
|
||||
interruptedToolCallIds: string[];
|
||||
} | null {
|
||||
if (
|
||||
@@ -483,7 +487,11 @@ export function consumeInterruptQueue(
|
||||
|
||||
const ctx = runtime.pendingInterruptedContext;
|
||||
let result: {
|
||||
approvalMessage: { type: "approval"; approvals: ApprovalResult[] };
|
||||
approvalMessage: {
|
||||
type: "approval";
|
||||
approvals: ApprovalResult[];
|
||||
otid?: string;
|
||||
};
|
||||
interruptedToolCallIds: string[];
|
||||
} | null = null;
|
||||
|
||||
@@ -497,6 +505,7 @@ export function consumeInterruptQueue(
|
||||
approvalMessage: {
|
||||
type: "approval",
|
||||
approvals: runtime.pendingInterruptedResults,
|
||||
otid: crypto.randomUUID(),
|
||||
},
|
||||
interruptedToolCallIds: runtime.pendingInterruptedToolCallIds
|
||||
? [...runtime.pendingInterruptedToolCallIds]
|
||||
|
||||
@@ -31,11 +31,35 @@ export function getPermissionModeScopeKey(
|
||||
return `conversation:${normalizedConversationId}`;
|
||||
}
|
||||
|
||||
function createDefaultPermissionModeState(): ConversationPermissionModeState {
|
||||
return {
|
||||
mode: globalPermissionMode.getMode(),
|
||||
planFilePath: null,
|
||||
modeBeforePlan: null,
|
||||
};
|
||||
}
|
||||
|
||||
function isPrunableDefaultState(
|
||||
state: ConversationPermissionModeState,
|
||||
): boolean {
|
||||
return (
|
||||
state.mode === globalPermissionMode.getMode() &&
|
||||
state.planFilePath === null &&
|
||||
state.modeBeforePlan === null
|
||||
);
|
||||
}
|
||||
|
||||
/**
|
||||
* Read-only state lookup for a conversation scope.
|
||||
*
|
||||
* This helper is intended for read paths (status rendering, serialization).
|
||||
* It does not materialize new map entries for missing scopes.
|
||||
*/
|
||||
export function getConversationPermissionModeState(
|
||||
runtime: ListenerRuntime,
|
||||
agentId?: string | null,
|
||||
conversationId?: string | null,
|
||||
): ConversationPermissionModeState {
|
||||
): Readonly<ConversationPermissionModeState> {
|
||||
const scopeKey = getPermissionModeScopeKey(agentId, conversationId);
|
||||
const normalizedConversationId = normalizeConversationId(conversationId);
|
||||
|
||||
@@ -52,42 +76,78 @@ export function getConversationPermissionModeState(
|
||||
const legacyDefault =
|
||||
runtime.permissionModeByConversation.get(legacyDefaultKey);
|
||||
if (legacyDefault) {
|
||||
if (normalizeCwdAgentId(agentId)) {
|
||||
runtime.permissionModeByConversation.set(scopeKey, {
|
||||
...legacyDefault,
|
||||
});
|
||||
const normalizedAgentId = normalizeCwdAgentId(agentId);
|
||||
if (normalizedAgentId) {
|
||||
runtime.permissionModeByConversation.set(scopeKey, legacyDefault);
|
||||
runtime.permissionModeByConversation.delete(legacyDefaultKey);
|
||||
}
|
||||
return legacyDefault;
|
||||
}
|
||||
}
|
||||
|
||||
return {
|
||||
mode: globalPermissionMode.getMode(),
|
||||
planFilePath: null,
|
||||
modeBeforePlan: null,
|
||||
};
|
||||
return createDefaultPermissionModeState();
|
||||
}
|
||||
|
||||
export function setConversationPermissionModeState(
|
||||
/**
|
||||
* Returns the canonical mutable state object for a conversation scope.
|
||||
*
|
||||
* This helper materializes missing entries and guarantees stable identity
|
||||
* during a turn so concurrent mode updates (websocket + tool mutations)
|
||||
* apply to the same object reference.
|
||||
*/
|
||||
export function getOrCreateConversationPermissionModeStateRef(
|
||||
runtime: ListenerRuntime,
|
||||
agentId: string | null,
|
||||
conversationId: string,
|
||||
state: ConversationPermissionModeState,
|
||||
): void {
|
||||
agentId?: string | null,
|
||||
conversationId?: string | null,
|
||||
): ConversationPermissionModeState {
|
||||
const scopeKey = getPermissionModeScopeKey(agentId, conversationId);
|
||||
// Only store if different from the global default to keep the map lean.
|
||||
if (
|
||||
state.mode === globalPermissionMode.getMode() &&
|
||||
state.planFilePath === null &&
|
||||
state.modeBeforePlan === null
|
||||
) {
|
||||
runtime.permissionModeByConversation.delete(scopeKey);
|
||||
} else {
|
||||
runtime.permissionModeByConversation.set(scopeKey, { ...state });
|
||||
const normalizedConversationId = normalizeConversationId(conversationId);
|
||||
|
||||
const direct = runtime.permissionModeByConversation.get(scopeKey);
|
||||
if (direct) {
|
||||
return direct;
|
||||
}
|
||||
|
||||
persistPermissionModeMap(runtime.permissionModeByConversation);
|
||||
if (normalizedConversationId === "default") {
|
||||
const legacyDefaultKey = getPermissionModeScopeKey(null, "default");
|
||||
const legacyDefault =
|
||||
runtime.permissionModeByConversation.get(legacyDefaultKey);
|
||||
if (legacyDefault) {
|
||||
const normalizedAgentId = normalizeCwdAgentId(agentId);
|
||||
if (normalizedAgentId) {
|
||||
runtime.permissionModeByConversation.set(scopeKey, legacyDefault);
|
||||
runtime.permissionModeByConversation.delete(legacyDefaultKey);
|
||||
}
|
||||
return legacyDefault;
|
||||
}
|
||||
}
|
||||
|
||||
const created = createDefaultPermissionModeState();
|
||||
runtime.permissionModeByConversation.set(scopeKey, created);
|
||||
return created;
|
||||
}
|
||||
|
||||
/**
|
||||
* Remove a canonical state entry when it is equivalent to the default state.
|
||||
*
|
||||
* This should be called at turn finalization boundaries, not on each mode
|
||||
* update, to avoid breaking object identity for in-flight turns.
|
||||
*/
|
||||
export function pruneConversationPermissionModeStateIfDefault(
|
||||
runtime: ListenerRuntime,
|
||||
agentId?: string | null,
|
||||
conversationId?: string | null,
|
||||
): boolean {
|
||||
const scopeKey = getPermissionModeScopeKey(agentId, conversationId);
|
||||
const state = runtime.permissionModeByConversation.get(scopeKey);
|
||||
if (!state) {
|
||||
return false;
|
||||
}
|
||||
if (!isPrunableDefaultState(state)) {
|
||||
return false;
|
||||
}
|
||||
runtime.permissionModeByConversation.delete(scopeKey);
|
||||
return true;
|
||||
}
|
||||
|
||||
/**
|
||||
@@ -124,6 +184,15 @@ export function loadPersistedPermissionModeMap(): Map<
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Persist permission mode map to remote-settings.json.
|
||||
*/
|
||||
export function persistPermissionModeMapForRuntime(
|
||||
runtime: ListenerRuntime,
|
||||
): void {
|
||||
persistPermissionModeMap(runtime.permissionModeByConversation);
|
||||
}
|
||||
|
||||
/**
|
||||
* Serialize the permission mode map and persist to remote-settings.json.
|
||||
* Strips planFilePath (ephemeral). Converts "plan" mode to modeBeforePlan.
|
||||
|
||||
@@ -2,7 +2,11 @@ import type WebSocket from "ws";
|
||||
import type {
|
||||
AbortMessageCommand,
|
||||
ChangeDeviceStateCommand,
|
||||
EnableMemfsCommand,
|
||||
InputCommand,
|
||||
ListInDirectoryCommand,
|
||||
ListMemoryCommand,
|
||||
ReadFileCommand,
|
||||
RuntimeScope,
|
||||
SearchFilesCommand,
|
||||
SyncCommand,
|
||||
@@ -253,6 +257,56 @@ export function isSearchFilesCommand(
|
||||
);
|
||||
}
|
||||
|
||||
export function isListInDirectoryCommand(
|
||||
value: unknown,
|
||||
): value is ListInDirectoryCommand {
|
||||
if (!value || typeof value !== "object") return false;
|
||||
const c = value as { type?: unknown; path?: unknown };
|
||||
return c.type === "list_in_directory" && typeof c.path === "string";
|
||||
}
|
||||
|
||||
export function isReadFileCommand(value: unknown): value is ReadFileCommand {
|
||||
if (!value || typeof value !== "object") return false;
|
||||
const c = value as { type?: unknown; path?: unknown; request_id?: unknown };
|
||||
return (
|
||||
c.type === "read_file" &&
|
||||
typeof c.path === "string" &&
|
||||
typeof c.request_id === "string"
|
||||
);
|
||||
}
|
||||
|
||||
export function isListMemoryCommand(
|
||||
value: unknown,
|
||||
): value is ListMemoryCommand {
|
||||
if (!value || typeof value !== "object") return false;
|
||||
const c = value as {
|
||||
type?: unknown;
|
||||
request_id?: unknown;
|
||||
agent_id?: unknown;
|
||||
};
|
||||
return (
|
||||
c.type === "list_memory" &&
|
||||
typeof c.request_id === "string" &&
|
||||
typeof c.agent_id === "string"
|
||||
);
|
||||
}
|
||||
|
||||
export function isEnableMemfsCommand(
|
||||
value: unknown,
|
||||
): value is EnableMemfsCommand {
|
||||
if (!value || typeof value !== "object") return false;
|
||||
const c = value as {
|
||||
type?: unknown;
|
||||
request_id?: unknown;
|
||||
agent_id?: unknown;
|
||||
};
|
||||
return (
|
||||
c.type === "enable_memfs" &&
|
||||
typeof c.request_id === "string" &&
|
||||
typeof c.agent_id === "string"
|
||||
);
|
||||
}
|
||||
|
||||
export function parseServerMessage(
|
||||
data: WebSocket.RawData,
|
||||
): ParsedServerMessage | null {
|
||||
@@ -268,7 +322,11 @@ export function parseServerMessage(
|
||||
isTerminalInputCommand(parsed) ||
|
||||
isTerminalResizeCommand(parsed) ||
|
||||
isTerminalKillCommand(parsed) ||
|
||||
isSearchFilesCommand(parsed)
|
||||
isSearchFilesCommand(parsed) ||
|
||||
isListInDirectoryCommand(parsed) ||
|
||||
isReadFileCommand(parsed) ||
|
||||
isListMemoryCommand(parsed) ||
|
||||
isEnableMemfsCommand(parsed)
|
||||
) {
|
||||
return parsed as WsProtocolCommand;
|
||||
}
|
||||
|
||||
@@ -1,5 +1,6 @@
|
||||
import type { MessageCreate } from "@letta-ai/letta-client/resources/agents/agents";
|
||||
import WebSocket from "ws";
|
||||
import { getMemoryFilesystemRoot } from "../../agent/memoryFilesystem";
|
||||
import { permissionMode } from "../../permissions/mode";
|
||||
import type { DequeuedBatch } from "../../queue/queueRuntime";
|
||||
import { settingsManager } from "../../settings-manager";
|
||||
@@ -101,6 +102,7 @@ export function buildDeviceStatus(
|
||||
current_available_skills: [],
|
||||
background_processes: [],
|
||||
pending_control_requests: [],
|
||||
memory_directory: null,
|
||||
};
|
||||
}
|
||||
const scope = getScopeForRuntime(runtime, params);
|
||||
@@ -145,6 +147,9 @@ export function buildDeviceStatus(
|
||||
current_available_skills: [],
|
||||
background_processes: [],
|
||||
pending_control_requests: getPendingControlRequests(listener, scope),
|
||||
memory_directory: scopedAgentId
|
||||
? getMemoryFilesystemRoot(scopedAgentId)
|
||||
: null,
|
||||
};
|
||||
}
|
||||
|
||||
|
||||
@@ -41,7 +41,7 @@ import {
|
||||
emitToolExecutionFinishedEvents,
|
||||
emitToolExecutionStartedEvents,
|
||||
} from "./interrupts";
|
||||
import { getConversationPermissionModeState } from "./permissionMode";
|
||||
import { getOrCreateConversationPermissionModeStateRef } from "./permissionMode";
|
||||
import {
|
||||
emitDequeuedUserMessage,
|
||||
emitRetryDelta,
|
||||
@@ -55,6 +55,7 @@ import {
|
||||
getApprovalContinuationRecoveryDisposition,
|
||||
isApprovalToolCallDesyncError,
|
||||
} from "./recovery";
|
||||
import { injectQueuedSkillContent } from "./skill-injection";
|
||||
import type { ConversationRuntime } from "./types";
|
||||
|
||||
export function isApprovalOnlyInput(
|
||||
@@ -157,7 +158,7 @@ export async function resolveStaleApprovals(
|
||||
requireArgsForAutoApprove: true,
|
||||
missingNameReason: "Tool call incomplete - missing name",
|
||||
workingDirectory: recoveryWorkingDirectory,
|
||||
permissionModeState: getConversationPermissionModeState(
|
||||
permissionModeState: getOrCreateConversationPermissionModeStateRef(
|
||||
runtime.listener,
|
||||
runtime.agentId,
|
||||
runtime.conversationId,
|
||||
@@ -290,6 +291,7 @@ export async function resolveStaleApprovals(
|
||||
{
|
||||
type: "approval",
|
||||
approvals: approvalResults,
|
||||
otid: crypto.randomUUID(),
|
||||
},
|
||||
];
|
||||
const consumedQueuedTurn = consumeQueuedTurn(runtime);
|
||||
@@ -299,9 +301,11 @@ export async function resolveStaleApprovals(
|
||||
emitDequeuedUserMessage(socket, runtime, queuedTurn, dequeuedBatch);
|
||||
}
|
||||
|
||||
const continuationMessagesWithSkillContent =
|
||||
injectQueuedSkillContent(continuationMessages);
|
||||
const recoveryStream = await sendApprovalContinuationWithRetry(
|
||||
recoveryConversationId,
|
||||
continuationMessages,
|
||||
continuationMessagesWithSkillContent,
|
||||
{
|
||||
agentId: runtime.agentId ?? undefined,
|
||||
streamTokens: true,
|
||||
|
||||
30
src/websocket/listener/skill-injection.ts
Normal file
30
src/websocket/listener/skill-injection.ts
Normal file
@@ -0,0 +1,30 @@
|
||||
import type { MessageCreate } from "@letta-ai/letta-client/resources/agents/agents";
|
||||
import type { ApprovalCreate } from "@letta-ai/letta-client/resources/agents/messages";
|
||||
import { consumeQueuedSkillContent } from "../../tools/impl/skillContentRegistry";
|
||||
|
||||
/**
|
||||
* Append queued Skill tool content as a trailing user message.
|
||||
*
|
||||
* Ordering is preserved: existing messages stay in place and skill content,
|
||||
* when present, is appended at the end.
|
||||
*/
|
||||
export function injectQueuedSkillContent(
|
||||
messages: Array<MessageCreate | ApprovalCreate>,
|
||||
): Array<MessageCreate | ApprovalCreate> {
|
||||
const skillContents = consumeQueuedSkillContent();
|
||||
if (skillContents.length === 0) {
|
||||
return messages;
|
||||
}
|
||||
|
||||
return [
|
||||
...messages,
|
||||
{
|
||||
role: "user",
|
||||
otid: crypto.randomUUID(),
|
||||
content: skillContents.map((sc) => ({
|
||||
type: "text" as const,
|
||||
text: sc.content,
|
||||
})),
|
||||
},
|
||||
];
|
||||
}
|
||||
@@ -42,6 +42,7 @@ import {
|
||||
markAwaitingAcceptedApprovalContinuationRunId,
|
||||
sendApprovalContinuationWithRetry,
|
||||
} from "./send";
|
||||
import { injectQueuedSkillContent } from "./skill-injection";
|
||||
import type { ConversationRuntime } from "./types";
|
||||
|
||||
type Decision =
|
||||
@@ -332,13 +333,15 @@ export async function handleApprovalStop(params: {
|
||||
emitDequeuedUserMessage(socket, runtime, queuedTurn, dequeuedBatch);
|
||||
}
|
||||
|
||||
const nextInputWithSkillContent = injectQueuedSkillContent(nextInput);
|
||||
|
||||
setLoopStatus(runtime, "SENDING_API_REQUEST", {
|
||||
agent_id: agentId,
|
||||
conversation_id: conversationId,
|
||||
});
|
||||
const stream = await sendApprovalContinuationWithRetry(
|
||||
conversationId,
|
||||
nextInput,
|
||||
nextInputWithSkillContent,
|
||||
buildSendOptions(),
|
||||
socket,
|
||||
runtime,
|
||||
@@ -348,7 +351,7 @@ export async function handleApprovalStop(params: {
|
||||
return {
|
||||
terminated: true,
|
||||
stream: null,
|
||||
currentInput: nextInput,
|
||||
currentInput: nextInputWithSkillContent,
|
||||
dequeuedBatchId: continuationBatchId,
|
||||
pendingNormalizationInterruptedToolCallIds: [],
|
||||
turnToolContextId,
|
||||
@@ -392,7 +395,7 @@ export async function handleApprovalStop(params: {
|
||||
return {
|
||||
terminated: false,
|
||||
stream,
|
||||
currentInput: nextInput,
|
||||
currentInput: nextInputWithSkillContent,
|
||||
dequeuedBatchId: continuationBatchId,
|
||||
pendingNormalizationInterruptedToolCallIds: [],
|
||||
turnToolContextId: null,
|
||||
|
||||
@@ -44,8 +44,9 @@ import {
|
||||
populateInterruptQueue,
|
||||
} from "./interrupts";
|
||||
import {
|
||||
getConversationPermissionModeState,
|
||||
setConversationPermissionModeState,
|
||||
getOrCreateConversationPermissionModeStateRef,
|
||||
persistPermissionModeMapForRuntime,
|
||||
pruneConversationPermissionModeStateIfDefault,
|
||||
} from "./permissionMode";
|
||||
import {
|
||||
emitCanonicalMessageDelta,
|
||||
@@ -74,6 +75,7 @@ import {
|
||||
sendApprovalContinuationWithRetry,
|
||||
sendMessageStreamWithRetry,
|
||||
} from "./send";
|
||||
import { injectQueuedSkillContent } from "./skill-injection";
|
||||
import { handleApprovalStop } from "./turn-approval";
|
||||
import type { ConversationRuntime, IncomingMessage } from "./types";
|
||||
|
||||
@@ -98,16 +100,14 @@ export async function handleIncomingMessage(
|
||||
conversationId,
|
||||
);
|
||||
|
||||
// Build a mutable permission mode state object for this turn, seeded from the
|
||||
// persistent ListenerRuntime map. Tool implementations (EnterPlanMode, ExitPlanMode)
|
||||
// mutate it in place; we sync the final value back to the map after the turn.
|
||||
const turnPermissionModeState = {
|
||||
...getConversationPermissionModeState(
|
||||
runtime.listener,
|
||||
normalizedAgentId,
|
||||
conversationId,
|
||||
),
|
||||
};
|
||||
// Get the canonical mutable permission mode state ref for this turn.
|
||||
// Websocket mode changes and tool implementations (EnterPlanMode/ExitPlanMode)
|
||||
// all mutate this same object in place.
|
||||
const turnPermissionModeState = getOrCreateConversationPermissionModeStateRef(
|
||||
runtime.listener,
|
||||
normalizedAgentId,
|
||||
conversationId,
|
||||
);
|
||||
|
||||
const msgRunIds: string[] = [];
|
||||
let postStopApprovalRecoveryRetries = 0;
|
||||
@@ -183,7 +183,11 @@ export async function handleIncomingMessage(
|
||||
queuedInterruptedToolCallIds = consumed.interruptedToolCallIds;
|
||||
}
|
||||
|
||||
messagesToSend.push(...normalizedMessages);
|
||||
messagesToSend.push(
|
||||
...normalizedMessages.map((m) =>
|
||||
"content" in m && !m.otid ? { ...m, otid: crypto.randomUUID() } : m,
|
||||
),
|
||||
);
|
||||
|
||||
const firstMessage = normalizedMessages[0];
|
||||
const isApprovalMessage =
|
||||
@@ -232,11 +236,12 @@ export async function handleIncomingMessage(
|
||||
});
|
||||
|
||||
const isPureApprovalContinuation = isApprovalOnlyInput(currentInput);
|
||||
const currentInputWithSkillContent = injectQueuedSkillContent(currentInput);
|
||||
|
||||
let stream = isPureApprovalContinuation
|
||||
? await sendApprovalContinuationWithRetry(
|
||||
conversationId,
|
||||
currentInput,
|
||||
currentInputWithSkillContent,
|
||||
buildSendOptions(),
|
||||
socket,
|
||||
runtime,
|
||||
@@ -244,12 +249,13 @@ export async function handleIncomingMessage(
|
||||
)
|
||||
: await sendMessageStreamWithRetry(
|
||||
conversationId,
|
||||
currentInput,
|
||||
currentInputWithSkillContent,
|
||||
buildSendOptions(),
|
||||
socket,
|
||||
runtime,
|
||||
runtime.activeAbortController.signal,
|
||||
);
|
||||
currentInput = currentInputWithSkillContent;
|
||||
if (!stream) {
|
||||
return;
|
||||
}
|
||||
@@ -417,27 +423,28 @@ export async function handleIncomingMessage(
|
||||
agent_id: agentId,
|
||||
conversation_id: conversationId,
|
||||
});
|
||||
stream =
|
||||
currentInput.length === 1 &&
|
||||
currentInput[0] !== undefined &&
|
||||
"type" in currentInput[0] &&
|
||||
currentInput[0].type === "approval"
|
||||
? await sendApprovalContinuationWithRetry(
|
||||
conversationId,
|
||||
currentInput,
|
||||
buildSendOptions(),
|
||||
socket,
|
||||
runtime,
|
||||
runtime.activeAbortController.signal,
|
||||
)
|
||||
: await sendMessageStreamWithRetry(
|
||||
conversationId,
|
||||
currentInput,
|
||||
buildSendOptions(),
|
||||
socket,
|
||||
runtime,
|
||||
runtime.activeAbortController.signal,
|
||||
);
|
||||
const isPureApprovalContinuationRetry =
|
||||
isApprovalOnlyInput(currentInput);
|
||||
const retryInputWithSkillContent =
|
||||
injectQueuedSkillContent(currentInput);
|
||||
stream = isPureApprovalContinuationRetry
|
||||
? await sendApprovalContinuationWithRetry(
|
||||
conversationId,
|
||||
retryInputWithSkillContent,
|
||||
buildSendOptions(),
|
||||
socket,
|
||||
runtime,
|
||||
runtime.activeAbortController.signal,
|
||||
)
|
||||
: await sendMessageStreamWithRetry(
|
||||
conversationId,
|
||||
retryInputWithSkillContent,
|
||||
buildSendOptions(),
|
||||
socket,
|
||||
runtime,
|
||||
runtime.activeAbortController.signal,
|
||||
);
|
||||
currentInput = retryInputWithSkillContent;
|
||||
if (!stream) {
|
||||
return;
|
||||
}
|
||||
@@ -500,27 +507,28 @@ export async function handleIncomingMessage(
|
||||
agent_id: agentId,
|
||||
conversation_id: conversationId,
|
||||
});
|
||||
stream =
|
||||
currentInput.length === 1 &&
|
||||
currentInput[0] !== undefined &&
|
||||
"type" in currentInput[0] &&
|
||||
currentInput[0].type === "approval"
|
||||
? await sendApprovalContinuationWithRetry(
|
||||
conversationId,
|
||||
currentInput,
|
||||
buildSendOptions(),
|
||||
socket,
|
||||
runtime,
|
||||
runtime.activeAbortController.signal,
|
||||
)
|
||||
: await sendMessageStreamWithRetry(
|
||||
conversationId,
|
||||
currentInput,
|
||||
buildSendOptions(),
|
||||
socket,
|
||||
runtime,
|
||||
runtime.activeAbortController.signal,
|
||||
);
|
||||
const isPureApprovalContinuationRetry =
|
||||
isApprovalOnlyInput(currentInput);
|
||||
const retryInputWithSkillContent =
|
||||
injectQueuedSkillContent(currentInput);
|
||||
stream = isPureApprovalContinuationRetry
|
||||
? await sendApprovalContinuationWithRetry(
|
||||
conversationId,
|
||||
retryInputWithSkillContent,
|
||||
buildSendOptions(),
|
||||
socket,
|
||||
runtime,
|
||||
runtime.activeAbortController.signal,
|
||||
)
|
||||
: await sendMessageStreamWithRetry(
|
||||
conversationId,
|
||||
retryInputWithSkillContent,
|
||||
buildSendOptions(),
|
||||
socket,
|
||||
runtime,
|
||||
runtime.activeAbortController.signal,
|
||||
);
|
||||
currentInput = retryInputWithSkillContent;
|
||||
if (!stream) {
|
||||
return;
|
||||
}
|
||||
@@ -571,27 +579,28 @@ export async function handleIncomingMessage(
|
||||
agent_id: agentId,
|
||||
conversation_id: conversationId,
|
||||
});
|
||||
stream =
|
||||
currentInput.length === 1 &&
|
||||
currentInput[0] !== undefined &&
|
||||
"type" in currentInput[0] &&
|
||||
currentInput[0].type === "approval"
|
||||
? await sendApprovalContinuationWithRetry(
|
||||
conversationId,
|
||||
currentInput,
|
||||
buildSendOptions(),
|
||||
socket,
|
||||
runtime,
|
||||
runtime.activeAbortController.signal,
|
||||
)
|
||||
: await sendMessageStreamWithRetry(
|
||||
conversationId,
|
||||
currentInput,
|
||||
buildSendOptions(),
|
||||
socket,
|
||||
runtime,
|
||||
runtime.activeAbortController.signal,
|
||||
);
|
||||
const isPureApprovalContinuationRetry =
|
||||
isApprovalOnlyInput(currentInput);
|
||||
const retryInputWithSkillContent =
|
||||
injectQueuedSkillContent(currentInput);
|
||||
stream = isPureApprovalContinuationRetry
|
||||
? await sendApprovalContinuationWithRetry(
|
||||
conversationId,
|
||||
retryInputWithSkillContent,
|
||||
buildSendOptions(),
|
||||
socket,
|
||||
runtime,
|
||||
runtime.activeAbortController.signal,
|
||||
)
|
||||
: await sendMessageStreamWithRetry(
|
||||
conversationId,
|
||||
retryInputWithSkillContent,
|
||||
buildSendOptions(),
|
||||
socket,
|
||||
runtime,
|
||||
runtime.activeAbortController.signal,
|
||||
);
|
||||
currentInput = retryInputWithSkillContent;
|
||||
if (!stream) {
|
||||
return;
|
||||
}
|
||||
@@ -768,22 +777,17 @@ export async function handleIncomingMessage(
|
||||
console.error("[Listen] Error handling message:", error);
|
||||
}
|
||||
} finally {
|
||||
// Sync any permission mode changes made by tools (EnterPlanMode/ExitPlanMode)
|
||||
// back to the persistent ListenerRuntime map so the state survives eviction.
|
||||
setConversationPermissionModeState(
|
||||
// Prune lean defaults only at turn-finalization boundaries (never during
|
||||
// mid-turn mode changes), then persist the canonical map.
|
||||
pruneConversationPermissionModeStateIfDefault(
|
||||
runtime.listener,
|
||||
normalizedAgentId,
|
||||
conversationId,
|
||||
turnPermissionModeState,
|
||||
);
|
||||
persistPermissionModeMapForRuntime(runtime.listener);
|
||||
|
||||
// Emit a corrected device status now that the permission mode is synced.
|
||||
// The emitRuntimeStateUpdates() calls earlier in the turn read from the map
|
||||
// before setConversationPermissionModeState() ran, so they emitted a stale
|
||||
// current_permission_mode. This final emission sends the correct value,
|
||||
// ensuring the web UI (and desktop) always reflect mode changes from
|
||||
// EnterPlanMode/ExitPlanMode and that mid-turn web permission changes
|
||||
// are not reverted by a stale emission at turn end.
|
||||
// Emit device status after persistence/pruning so UI reflects the final
|
||||
// canonical state for this scope.
|
||||
emitDeviceStatusIfOpen(runtime, {
|
||||
agent_id: agentId || null,
|
||||
conversation_id: conversationId,
|
||||
|
||||
@@ -27,6 +27,31 @@ interface TerminalSession {
|
||||
spawnedAt: number;
|
||||
}
|
||||
|
||||
type NodePtyExitEvent = { exitCode?: number; signal?: number };
|
||||
|
||||
type NodePtyProcess = {
|
||||
pid: number;
|
||||
write: (data: string) => void;
|
||||
resize: (cols: number, rows: number) => void;
|
||||
kill: () => void;
|
||||
onData: (listener: (data: string) => void) => void;
|
||||
onExit: (listener: (event: NodePtyExitEvent) => void) => void;
|
||||
};
|
||||
|
||||
type NodePtyModule = {
|
||||
spawn: (
|
||||
file: string,
|
||||
args: string[],
|
||||
options: {
|
||||
name: string;
|
||||
cols: number;
|
||||
rows: number;
|
||||
cwd: string;
|
||||
env: Record<string, string>;
|
||||
},
|
||||
) => NodePtyProcess;
|
||||
};
|
||||
|
||||
const terminals = new Map<string, TerminalSession>();
|
||||
|
||||
function getDefaultShell(): string {
|
||||
@@ -160,7 +185,7 @@ function spawnNodePty(
|
||||
socket: WebSocket,
|
||||
): TerminalSession {
|
||||
// eslint-disable-next-line @typescript-eslint/no-require-imports
|
||||
const pty = require("node-pty") as typeof import("node-pty");
|
||||
const pty = require("node-pty") as NodePtyModule;
|
||||
|
||||
const handleData = makeOutputBatcher((data) =>
|
||||
sendTerminalMessage(socket, { type: "terminal_output", terminal_id, data }),
|
||||
@@ -180,7 +205,7 @@ function spawnNodePty(
|
||||
|
||||
ptyProcess.onData(handleData);
|
||||
|
||||
ptyProcess.onExit(({ exitCode }) => {
|
||||
ptyProcess.onExit(({ exitCode }: NodePtyExitEvent) => {
|
||||
const current = terminals.get(terminal_id);
|
||||
if (current && current.pid === ptyProcess.pid) {
|
||||
terminals.delete(terminal_id);
|
||||
|
||||
Reference in New Issue
Block a user