From 7af73fe53e38b823d571aaf6b095e0d4c4de38cd Mon Sep 17 00:00:00 2001 From: Kian Jones <11655409+kianjones9@users.noreply.github.com> Date: Fri, 23 Jan 2026 17:19:58 -0800 Subject: [PATCH] feat: retry messages on pre-mature interrupt (#593) Co-authored-by: Caren Thomas Co-authored-by: Letta --- src/agent/promptAssets.ts | 2 + .../prompts/interrupt_recovery_alert.txt | 1 + src/cli/App.tsx | 42 ++++++++++++++++++- 3 files changed, 44 insertions(+), 1 deletion(-) create mode 100644 src/agent/prompts/interrupt_recovery_alert.txt diff --git a/src/agent/promptAssets.ts b/src/agent/promptAssets.ts index 10b2731..976b1ff 100644 --- a/src/agent/promptAssets.ts +++ b/src/agent/promptAssets.ts @@ -5,6 +5,7 @@ import anthropicPrompt from "./prompts/claude.md"; import codexPrompt from "./prompts/codex.md"; import geminiPrompt from "./prompts/gemini.md"; import humanPrompt from "./prompts/human.mdx"; +import interruptRecoveryAlert from "./prompts/interrupt_recovery_alert.txt"; // init_memory.md is now a bundled skill at src/skills/builtin/init/SKILL.md import lettaAnthropicPrompt from "./prompts/letta_claude.md"; import lettaCodexPrompt from "./prompts/letta_codex.md"; @@ -31,6 +32,7 @@ export const SKILL_CREATOR_PROMPT = skillCreatorModePrompt; export const REMEMBER_PROMPT = rememberPrompt; export const MEMORY_CHECK_REMINDER = memoryCheckReminder; export const APPROVAL_RECOVERY_PROMPT = approvalRecoveryAlert; +export const INTERRUPT_RECOVERY_ALERT = interruptRecoveryAlert; export const MEMORY_PROMPTS: Record = { "persona.mdx": personaPrompt, diff --git a/src/agent/prompts/interrupt_recovery_alert.txt b/src/agent/prompts/interrupt_recovery_alert.txt new file mode 100644 index 0000000..4dbae8e --- /dev/null +++ b/src/agent/prompts/interrupt_recovery_alert.txt @@ -0,0 +1 @@ +The user interrupted the active stream. diff --git a/src/cli/App.tsx b/src/cli/App.tsx index 4e9ad6b..6166fe8 100644 --- a/src/cli/App.tsx +++ b/src/cli/App.tsx @@ -44,6 +44,7 @@ import { type AgentProvenance, createAgent } from "../agent/create"; import { ISOLATED_BLOCK_LABELS } from "../agent/memory"; import { sendMessageStream } from "../agent/message"; import { getModelInfo, getModelShortName } from "../agent/model"; +import { INTERRUPT_RECOVERY_ALERT } from "../agent/promptAssets"; import { SessionStats } from "../agent/stats"; import { INTERRUPTED_BY_USER, @@ -1173,6 +1174,11 @@ export default function App({ const queueAppendTimeoutRef = useRef(null); // 15s append mode timeout + // Cache last sent input - cleared on successful completion, remains if interrupted + const lastSentInputRef = useRef | null>( + null, + ); + // Epoch counter to force dequeue effect re-run when refs change but state doesn't // Incremented when userCancelledRef is reset while messages are queued const [dequeueEpoch, setDequeueEpoch] = useState(0); @@ -1904,7 +1910,7 @@ export default function App({ }; // Copy so we can safely mutate for retry recovery flows - const currentInput = [...initialInput]; + let currentInput = [...initialInput]; const allowReentry = options?.allowReentry ?? false; // Use provided generation (from onSubmit) or capture current @@ -1951,6 +1957,38 @@ export default function App({ setStreaming(true); abortControllerRef.current = new AbortController(); + // Recover interrupted message: if cache contains ONLY user messages, prepend them + // Note: type="message" is a local discriminator (not in SDK types) to distinguish from approvals + const originalInput = currentInput; + const cacheIsAllUserMsgs = lastSentInputRef.current?.every( + (m) => m.type === "message" && m.role === "user", + ); + if (cacheIsAllUserMsgs && lastSentInputRef.current) { + currentInput = [ + ...lastSentInputRef.current, + ...currentInput.map((m) => + m.type === "message" && m.role === "user" + ? { + ...m, + content: [ + { type: "text" as const, text: INTERRUPT_RECOVERY_ALERT }, + ...(typeof m.content === "string" + ? [{ type: "text" as const, text: m.content }] + : m.content), + ], + } + : m, + ), + ]; + // Cache old + new for chained recovery + lastSentInputRef.current = [ + ...lastSentInputRef.current, + ...originalInput, + ]; + } else { + lastSentInputRef.current = originalInput; + } + // Clear any stale pending tool calls from previous turns // If we're sending a new message, old pending state is no longer relevant // Pass false to avoid setting interrupted=true, which causes race conditions @@ -2358,6 +2396,7 @@ export default function App({ llmApiErrorRetriesRef.current = 0; // Reset retry counter on success conversationBusyRetriesRef.current = 0; lastDequeuedMessageRef.current = null; // Clear - message was processed successfully + lastSentInputRef.current = null; // Clear - no recovery needed // Run Stop hooks - if blocked/errored, continue the conversation with feedback const stopHookResult = await runStopHooks( @@ -2498,6 +2537,7 @@ export default function App({ // Clear stale state immediately to prevent ID mismatch bugs setAutoHandledResults([]); setAutoDeniedApprovals([]); + lastSentInputRef.current = null; // Clear - message was received by server // Use new approvals array, fallback to legacy approval for backward compat const approvalsToProcess =