feat: add memory log hook (#688)

This commit is contained in:
jnjpng
2026-01-26 15:54:12 -08:00
committed by GitHub
parent b281396b18
commit b884f84631
8 changed files with 761 additions and 5 deletions

View File

@@ -1493,6 +1493,11 @@ export default function App({
buffersRef.current.tokenStreamingEnabled = tokenStreamingEnabled;
}, [tokenStreamingEnabled]);
// Keep buffers in sync with agentId for server-side tool hooks
useEffect(() => {
buffersRef.current.agentId = agentState?.id;
}, [agentState?.id]);
// Cache precomputed diffs from approval dialogs for tool return rendering
// Key: toolCallId or "toolCallId:filePath" for Patch operations
const precomputedDiffsRef = useRef<Map<string, AdvancedDiffSuccess>>(

View File

@@ -6,6 +6,7 @@
import type { LettaStreamingResponse } from "@letta-ai/letta-client/resources/agents/messages";
import { INTERRUPTED_BY_USER } from "../../constants";
import { runPostToolUseHooks, runPreToolUseHooks } from "../../hooks";
import { findLastSafeSplitPoint } from "./markdownSplit";
import { isShellTool } from "./toolNameMapping";
@@ -157,6 +158,17 @@ export type Line =
}
| { kind: "separator"; id: string };
/**
* Tracks server-side tool calls for hook triggering.
* Server-side tools (tool_call_message) are executed by the Letta server,
* not the client, so we need to trigger hooks when we receive the stream messages.
*/
export interface ServerToolCallInfo {
toolName: string;
toolArgs: string;
preToolUseTriggered: boolean;
}
// Top-level state object for all streaming events
export type Buffers = {
tokenCount: number;
@@ -180,9 +192,13 @@ export type Buffers = {
// Aggressive static promotion: split streaming content at paragraph boundaries
tokenStreamingEnabled?: boolean;
splitCounters: Map<string, number>; // tracks split count per original otid
// Track server-side tool calls for hook triggering (toolCallId -> info)
serverToolCalls: Map<string, ServerToolCallInfo>;
// Agent ID for passing to hooks (needed for server-side tools like memory)
agentId?: string;
};
export function createBuffers(): Buffers {
export function createBuffers(agentId?: string): Buffers {
return {
tokenCount: 0,
order: [],
@@ -202,6 +218,8 @@ export function createBuffers(): Buffers {
},
tokenStreamingEnabled: false,
splitCounters: new Map(),
serverToolCalls: new Map(),
agentId,
};
}
@@ -591,6 +609,40 @@ export function onChunk(b: Buffers, chunk: LettaStreamingResponse) {
// Count tool call arguments as LLM output tokens
b.tokenCount += argsText.length;
}
// Track server-side tools and trigger PreToolUse hook (fire-and-forget since execution already started)
if (chunk.message_type === "tool_call_message" && toolCallId) {
const existing = b.serverToolCalls.get(toolCallId);
const toolInfo: ServerToolCallInfo = existing || {
toolName: "",
toolArgs: "",
preToolUseTriggered: false,
};
if (name) toolInfo.toolName = name;
if (argsText) toolInfo.toolArgs += argsText;
b.serverToolCalls.set(toolCallId, toolInfo);
if (toolInfo.toolName && !toolInfo.preToolUseTriggered) {
toolInfo.preToolUseTriggered = true;
let parsedArgs: Record<string, unknown> = {};
try {
if (toolInfo.toolArgs) {
parsedArgs = JSON.parse(toolInfo.toolArgs);
}
} catch {
// Args may be incomplete JSON
}
runPreToolUseHooks(
toolInfo.toolName,
parsedArgs,
toolCallId,
undefined,
b.agentId,
).catch(() => {});
}
}
break;
}
@@ -651,6 +703,35 @@ export function onChunk(b: Buffers, chunk: LettaStreamingResponse) {
resultOk: status === "success",
};
b.byId.set(id, updatedLine);
// Trigger PostToolUse hook for server-side tools (fire-and-forget)
if (toolCallId) {
const serverToolInfo = b.serverToolCalls.get(toolCallId);
if (serverToolInfo) {
let parsedArgs: Record<string, unknown> = {};
try {
if (serverToolInfo.toolArgs) {
parsedArgs = JSON.parse(serverToolInfo.toolArgs);
}
} catch {
// Args parsing failed
}
runPostToolUseHooks(
serverToolInfo.toolName,
parsedArgs,
{
status: status === "success" ? "success" : "error",
output: resultText,
},
toolCallId,
undefined,
b.agentId,
).catch(() => {});
b.serverToolCalls.delete(toolCallId);
}
}
}
break;
}