feat: implement client-side tools via client_tools spec (#456)

Co-authored-by: Letta <noreply@letta.com>
This commit is contained in:
Charles Packer
2026-01-02 23:35:40 -08:00
committed by GitHub
parent 5ad51d7095
commit 34367de5d7
14 changed files with 181 additions and 1157 deletions

View File

@@ -1,11 +1,10 @@
{
"lockfileVersion": 1,
"configVersion": 0,
"workspaces": {
"": {
"name": "@letta-ai/letta-code",
"dependencies": {
"@letta-ai/letta-client": "1.6.1",
"@letta-ai/letta-client": "1.6.3",
"glob": "^13.0.0",
"ink-link": "^5.0.0",
"open": "^10.2.0",
@@ -37,7 +36,7 @@
"@isaacs/brace-expansion": ["@isaacs/brace-expansion@5.0.0", "", { "dependencies": { "@isaacs/balanced-match": "^4.0.1" } }, "sha512-ZT55BDLV0yv0RBm2czMiZ+SqCGO7AvmOM3G/w2xhVPH+te0aKgFjmBvGlL1dH+ql2tgGO3MVrbb3jCKyvpgnxA=="],
"@letta-ai/letta-client": ["@letta-ai/letta-client@1.6.1", "", {}, "sha512-kCRnEKpeTj3e1xqRd58xvoCp28p/wuJUptrIlJ8cT2GiYkrOESlKmp6lc3f246VusrowdGeB9hSXePXZgd7rAA=="],
"@letta-ai/letta-client": ["@letta-ai/letta-client@1.6.3", "", {}, "sha512-WlWONBU2t8z9MynyQqatT9rKQdaP7s+cWSb+3e+2gF79TZ/qZHf9k0QOUgDoZPoTRme+BifVqTAfVxlirPBd8w=="],
"@types/bun": ["@types/bun@1.3.1", "", { "dependencies": { "bun-types": "1.3.1" } }, "sha512-4jNMk2/K9YJtfqwoAa28c8wK+T7nvJFOjxI4h/7sORWcypRNxBpr+TPNaCfVWq70tLCJsqoFwcf0oI0JU/fvMQ=="],

View File

@@ -30,7 +30,7 @@
"access": "public"
},
"dependencies": {
"@letta-ai/letta-client": "1.6.1",
"@letta-ai/letta-client": "1.6.3",
"glob": "^13.0.0",
"ink-link": "^5.0.0",
"open": "^10.2.0"

View File

@@ -8,7 +8,6 @@ import type {
AgentType,
} from "@letta-ai/letta-client/resources/agents/agents";
import { DEFAULT_AGENT_NAME } from "../constants";
import { getToolNames } from "../tools/manager";
import { getClient } from "./client";
import { getDefaultMemoryBlocks } from "./memory";
import {
@@ -126,13 +125,11 @@ export async function createAgent(
const client = await getClient();
// Get loaded tool names (tools are already registered with Letta)
// Map internal names to server names so the agent sees the correct tool names
const { getServerToolName } = await import("../tools/manager");
const internalToolNames = getToolNames();
const serverToolNames = internalToolNames.map((n) => getServerToolName(n));
const baseMemoryTool = modelHandle.startsWith("openai/gpt-5")
// Only attach server-side tools to the agent.
// Client-side tools (Read, Write, Bash, etc.) are passed via client_tools at runtime,
// NOT attached to the agent. This is the new pattern - no more stub tool registration.
const { isOpenAIModel } = await import("../tools/manager");
const baseMemoryTool = isOpenAIModel(modelHandle)
? "memory_apply_patch"
: "memory";
const defaultBaseTools = options.baseTools ?? [
@@ -142,7 +139,7 @@ export async function createAgent(
"fetch_webpage",
];
let toolNames = [...serverToolNames, ...defaultBaseTools];
let toolNames = [...defaultBaseTools];
// Fallback: if server doesn't have memory_apply_patch, use legacy memory tool
if (toolNames.includes("memory_apply_patch")) {

View File

@@ -6,7 +6,7 @@ import { resolve } from "node:path";
import type { AgentState } from "@letta-ai/letta-client/resources/agents/agents";
import { getClient } from "./client";
import { getModelUpdateArgs } from "./model";
import { linkToolsToAgent, updateAgentLLMConfig } from "./modify";
import { updateAgentLLMConfig } from "./modify";
export interface ImportAgentOptions {
filePath: string;
@@ -45,11 +45,11 @@ export async function importAgentFromFile(
if (options.modelOverride) {
const updateArgs = getModelUpdateArgs(options.modelOverride);
await updateAgentLLMConfig(agentId, options.modelOverride, updateArgs);
// Ensure the correct memory tool is attached for the new model
const { ensureCorrectMemoryTool } = await import("../tools/toolset");
await ensureCorrectMemoryTool(agentId, options.modelOverride);
agent = await client.agents.retrieve(agentId);
}
// Link Letta Code tools to the imported agent
await linkToolsToAgent(agentId);
return { agent };
}

View File

@@ -8,6 +8,7 @@ import type {
ApprovalCreate,
LettaStreamingResponse,
} from "@letta-ai/letta-client/resources/agents/messages";
import { getClientToolsFromRegistry } from "../tools/manager";
import { getClient } from "./client";
export async function sendMessageStream(
@@ -25,5 +26,6 @@ export async function sendMessageStream(
streaming: true,
stream_tokens: opts.streamTokens ?? true,
background: opts.background ?? true,
client_tools: getClientToolsFromRegistry(),
});
}

View File

@@ -9,7 +9,6 @@ import type {
} from "@letta-ai/letta-client/resources/agents/agents";
import type { LlmConfig } from "@letta-ai/letta-client/resources/models/models";
import { ANTHROPIC_PROVIDER_NAME } from "../providers/anthropic-provider";
import { getAllLettaToolNames, getToolNames } from "../tools/manager";
import { getClient } from "./client";
type ModelSettings =
@@ -147,169 +146,6 @@ export async function updateAgentLLMConfig(
return finalAgent.llm_config;
}
export interface LinkResult {
success: boolean;
message: string;
addedCount?: number;
}
export interface UnlinkResult {
success: boolean;
message: string;
removedCount?: number;
}
/**
* Attach all Letta Code tools to an agent.
*
* @param agentId - The agent ID
* @returns Result with success status and message
*/
export async function linkToolsToAgent(agentId: string): Promise<LinkResult> {
try {
const client = await getClient();
// Get ALL agent tools from agent state
const agent = await client.agents.retrieve(agentId, {
include: ["agent.tools"],
});
const currentTools = agent.tools || [];
const currentToolIds = currentTools
.map((t) => t.id)
.filter((id): id is string => typeof id === "string");
const currentToolNames = new Set(
currentTools
.map((t) => t.name)
.filter((name): name is string => typeof name === "string"),
);
// Get Letta Code tool names (internal names from registry)
const { getServerToolName } = await import("../tools/manager");
const lettaCodeToolNames = getToolNames();
// Find tools to add (tools that aren't already attached)
// Compare using server names since that's what the agent has
const toolsToAdd = lettaCodeToolNames.filter((internalName) => {
const serverName = getServerToolName(internalName);
return !currentToolNames.has(serverName);
});
if (toolsToAdd.length === 0) {
return {
success: true,
message: "All Letta Code tools already attached",
addedCount: 0,
};
}
// Look up tool IDs in parallel (instead of sequential calls)
const toolsToAddIds = (
await Promise.all(
toolsToAdd.map(async (toolName) => {
const serverName = getServerToolName(toolName);
const toolsResponse = await client.tools.list({ name: serverName });
return toolsResponse.items[0]?.id;
}),
)
).filter((id): id is string => !!id);
// Combine current tools with new tools
const newToolIds = [...currentToolIds, ...toolsToAddIds];
// Get current tool_rules and add requires_approval rules for new tools
// ALL Letta Code tools need requires_approval to be routed to the client
const currentToolRules = agent.tool_rules || [];
const newToolRules = [
...currentToolRules,
...toolsToAdd.map((toolName) => ({
tool_name: getServerToolName(toolName),
type: "requires_approval" as const,
prompt_template: null,
})),
];
await client.agents.update(agentId, {
tool_ids: newToolIds,
tool_rules: newToolRules,
});
return {
success: true,
message: `Attached ${toolsToAddIds.length} Letta Code tool(s) to agent`,
addedCount: toolsToAddIds.length,
};
} catch (error) {
return {
success: false,
message: `Failed: ${error instanceof Error ? error.message : String(error)}`,
};
}
}
/**
* Remove all Letta Code tools from an agent.
*
* @param agentId - The agent ID
* @returns Result with success status and message
*/
export async function unlinkToolsFromAgent(
agentId: string,
): Promise<UnlinkResult> {
try {
const client = await getClient();
// Get ALL agent tools from agent state (not tools.list which may be incomplete)
const agent = await client.agents.retrieve(agentId, {
include: ["agent.tools"],
});
const allTools = agent.tools || [];
// Get all possible Letta Code tool names (both internal and server names)
const { getServerToolName } = await import("../tools/manager");
const lettaCodeToolNames = new Set(getAllLettaToolNames());
const lettaCodeServerNames = new Set(
Array.from(lettaCodeToolNames).map((name) => getServerToolName(name)),
);
// Filter out Letta Code tools, keep everything else
// Check against server names since that's what the agent sees
const remainingTools = allTools.filter(
(t) => t.name && !lettaCodeServerNames.has(t.name),
);
const removedCount = allTools.length - remainingTools.length;
// Extract IDs from remaining tools (filter out any undefined IDs)
const remainingToolIds = remainingTools
.map((t) => t.id)
.filter((id): id is string => typeof id === "string");
// Remove approval rules for Letta Code tools being unlinked
// Check against server names since that's what appears in tool_rules
const currentToolRules = agent.tool_rules || [];
const remainingToolRules = currentToolRules.filter(
(rule) =>
rule.type !== "requires_approval" ||
!lettaCodeServerNames.has(rule.tool_name),
);
await client.agents.update(agentId, {
tool_ids: remainingToolIds,
tool_rules: remainingToolRules,
});
return {
success: true,
message: `Removed ${removedCount} Letta Code tool(s) from agent`,
removedCount,
};
} catch (error) {
return {
success: false,
message: `Failed: ${error instanceof Error ? error.message : String(error)}`,
};
}
}
export interface SystemPromptUpdateResult {
success: boolean;
message: string;

View File

@@ -36,12 +36,14 @@ import { type PermissionMode, permissionMode } from "../permissions/mode";
import { updateProjectSettings } from "../settings";
import { settingsManager } from "../settings-manager";
import { telemetry } from "../telemetry";
import type { ToolExecutionResult } from "../tools/manager";
import {
analyzeToolApproval,
checkToolPermission,
executeTool,
isGeminiModel,
isOpenAIModel,
savePermissionRule,
type ToolExecutionResult,
} from "../tools/manager";
import {
handleMcpAdd,
@@ -393,8 +395,6 @@ export default function App({
agentState?: AgentState | null;
loadingState?:
| "assembling"
| "upserting"
| "updating_tools"
| "importing"
| "initializing"
| "checking"
@@ -1075,11 +1075,14 @@ export default function App({
setCurrentModelId(agentModelHandle || null);
}
// Detect current toolset from attached tools
const { detectToolsetFromAgent } = await import("../tools/toolset");
const detected = await detectToolsetFromAgent(client, agentId);
if (detected) {
setCurrentToolset(detected);
// Derive toolset from agent's model (not persisted, computed on resume)
if (agentModelHandle) {
const derivedToolset = isOpenAIModel(agentModelHandle)
? "codex"
: isGeminiModel(agentModelHandle)
? "gemini"
: "default";
setCurrentToolset(derivedToolset);
}
} catch (error) {
console.error("Error fetching agent config:", error);
@@ -3346,96 +3349,6 @@ export default function App({
return { submitted: true };
}
// Special handling for /link command - attach all Letta Code tools (deprecated)
if (msg.trim() === "/link" || msg.trim().startsWith("/link ")) {
const cmdId = uid("cmd");
buffersRef.current.byId.set(cmdId, {
kind: "command",
id: cmdId,
input: msg,
output: "Attaching Letta Code tools...",
phase: "running",
});
buffersRef.current.order.push(cmdId);
refreshDerived();
setCommandRunning(true);
try {
const { linkToolsToAgent } = await import("../agent/modify");
const result = await linkToolsToAgent(agentId);
buffersRef.current.byId.set(cmdId, {
kind: "command",
id: cmdId,
input: msg,
output: result.message,
phase: "finished",
success: result.success,
});
refreshDerived();
} catch (error) {
const errorDetails = formatErrorDetails(error, agentId);
buffersRef.current.byId.set(cmdId, {
kind: "command",
id: cmdId,
input: msg,
output: `Failed to link tools: ${errorDetails}`,
phase: "finished",
success: false,
});
refreshDerived();
} finally {
setCommandRunning(false);
}
return { submitted: true };
}
// Special handling for /unlink command - remove all Letta Code tools (deprecated)
if (msg.trim() === "/unlink" || msg.trim().startsWith("/unlink ")) {
const cmdId = uid("cmd");
buffersRef.current.byId.set(cmdId, {
kind: "command",
id: cmdId,
input: msg,
output: "Removing Letta Code tools...",
phase: "running",
});
buffersRef.current.order.push(cmdId);
refreshDerived();
setCommandRunning(true);
try {
const { unlinkToolsFromAgent } = await import("../agent/modify");
const result = await unlinkToolsFromAgent(agentId);
buffersRef.current.byId.set(cmdId, {
kind: "command",
id: cmdId,
input: msg,
output: result.message,
phase: "finished",
success: result.success,
});
refreshDerived();
} catch (error) {
const errorDetails = formatErrorDetails(error, agentId);
buffersRef.current.byId.set(cmdId, {
kind: "command",
id: cmdId,
input: msg,
output: `Failed to unlink tools: ${errorDetails}`,
phase: "finished",
success: false,
});
refreshDerived();
} finally {
setCommandRunning(false);
}
return { submitted: true };
}
// Special handling for /bg command - show background shell processes
if (msg.trim() === "/bg") {
const { backgroundProcesses } = await import(

View File

@@ -48,8 +48,6 @@ async function getAuthMethod(): Promise<"url" | "api-key" | "oauth"> {
type LoadingState =
| "assembling"
| "upserting"
| "updating_tools"
| "importing"
| "initializing"
| "checking"
@@ -144,10 +142,6 @@ function getLoadingMessage(
return continueSession ? "Resuming agent..." : "Creating agent...";
case "assembling":
return "Assembling tools...";
case "upserting":
return "Upserting tools...";
case "updating_tools":
return "Updating tools...";
case "importing":
return "Importing agent...";
case "checking":

View File

@@ -31,11 +31,7 @@ import { formatErrorDetails } from "./cli/helpers/errorFormatter";
import { safeJsonParseOr } from "./cli/helpers/safeJsonParse";
import { drainStreamWithResume } from "./cli/helpers/stream";
import { settingsManager } from "./settings-manager";
import {
checkToolPermission,
forceUpsertTools,
isToolsNotFoundError,
} from "./tools/manager";
import { checkToolPermission } from "./tools/manager";
import type {
AutoApprovalMessage,
CanUseToolControlRequest,
@@ -93,8 +89,6 @@ export async function handleHeadlessCommand(
"permission-mode": { type: "string" },
yolo: { type: "boolean" },
skills: { type: "string" },
link: { type: "boolean" },
unlink: { type: "boolean" },
sleeptime: { type: "boolean" },
"init-blocks": { type: "string" },
"base-tools": { type: "string" },
@@ -162,12 +156,6 @@ export async function handleHeadlessCommand(
const client = await getClient();
// Get base URL for tool upsert operations
const baseURL =
process.env.LETTA_BASE_URL ||
settings.env?.LETTA_BASE_URL ||
"https://api.letta.com";
// Resolve agent (same logic as interactive mode)
let agent: AgentState | null = null;
const specifiedAgentId = values.agent as string | undefined;
@@ -354,19 +342,8 @@ export async function handleHeadlessCommand(
memoryBlocks,
blockValues,
};
try {
const result = await createAgent(createOptions);
agent = result.agent;
} catch (err) {
if (isToolsNotFoundError(err)) {
console.warn("Tools missing on server, re-uploading and retrying...");
await forceUpsertTools(client, baseURL);
const result = await createAgent(createOptions);
agent = result.agent;
} else {
throw err;
}
}
const result = await createAgent(createOptions);
agent = result.agent;
}
// Priority 4: Try to resume from project settings (.letta/settings.local.json)
@@ -407,19 +384,8 @@ export async function handleHeadlessCommand(
systemPromptPreset,
// Note: systemCustom, systemAppend, and memoryBlocks only apply with --new flag
};
try {
const result = await createAgent(createOptions);
agent = result.agent;
} catch (err) {
if (isToolsNotFoundError(err)) {
console.warn("Tools missing on server, re-uploading and retrying...");
await forceUpsertTools(client, baseURL);
const result = await createAgent(createOptions);
agent = result.agent;
} else {
throw err;
}
}
const result = await createAgent(createOptions);
agent = result.agent;
}
// Check if we're resuming an existing agent (not creating a new one)

View File

@@ -12,12 +12,7 @@ import { ProfileSelectionInline } from "./cli/profile-selection";
import { permissionMode } from "./permissions/mode";
import { settingsManager } from "./settings-manager";
import { telemetry } from "./telemetry";
import {
forceUpsertTools,
isToolsNotFoundError,
loadTools,
upsertToolsIfNeeded,
} from "./tools/manager";
import { loadTools } from "./tools/manager";
// Stable empty array constants to prevent new references on every render
// These are used as fallbacks when resumeData is null, avoiding the React
@@ -348,8 +343,6 @@ async function main(): Promise<void> {
"input-format": { type: "string" },
"include-partial-messages": { type: "boolean" },
skills: { type: "string" },
link: { type: "boolean" },
unlink: { type: "boolean" },
sleeptime: { type: "boolean" },
"from-af": { type: "string" },
},
@@ -735,22 +728,6 @@ async function main(): Promise<void> {
}
}
// Handle --link and --unlink flags (modify tools before starting session)
const shouldLink = values.link as boolean | undefined;
const shouldUnlink = values.unlink as boolean | undefined;
// Validate --link/--unlink flags require --agent
// Validate --link/--unlink flags require --agent
if (shouldLink || shouldUnlink) {
if (!specifiedAgentId) {
console.error(
`Error: --${shouldLink ? "link" : "unlink"} requires --agent <id>`,
);
process.exit(1);
}
// Implementation is in InteractiveSession init()
}
if (isHeadless) {
// For headless mode, load tools synchronously (respecting model/toolset when provided)
const modelForTools = getModelForToolLoading(
@@ -758,8 +735,6 @@ async function main(): Promise<void> {
specifiedToolset as "codex" | "default" | undefined,
);
await loadTools(modelForTools);
const client = await getClient();
await upsertToolsIfNeeded(client, baseURL);
const { handleHeadlessCommand } = await import("./headless");
await handleHeadlessCommand(process.argv, specifiedModel, skillsDirectory);
@@ -814,8 +789,6 @@ async function main(): Promise<void> {
| "selecting"
| "selecting_global"
| "assembling"
| "upserting"
| "updating_tools"
| "importing"
| "initializing"
| "checking"
@@ -991,59 +964,14 @@ async function main(): Promise<void> {
// Set resuming state early so loading messages are accurate
setIsResumingSession(!!resumingAgentId);
// If resuming an existing agent, load the exact tools attached to it
// Otherwise, load a full toolset based on model/toolset preference
if (resumingAgentId && !toolset) {
try {
const { getAttachedLettaTools } = await import("./tools/toolset");
const { loadSpecificTools } = await import("./tools/manager");
const attachedTools = await getAttachedLettaTools(
client,
resumingAgentId,
);
if (attachedTools.length > 0) {
// Load only the specific tools attached to this agent
await loadSpecificTools(attachedTools);
} else {
// No Letta Code tools attached, load default based on model
const modelForTools = getModelForToolLoading(model, undefined);
await loadTools(modelForTools);
}
} catch {
// Detection failed, use model-based default
const modelForTools = getModelForToolLoading(model, undefined);
await loadTools(modelForTools);
}
} else {
// Creating new agent or explicit toolset specified - load full toolset
const modelForTools = getModelForToolLoading(model, toolset);
await loadTools(modelForTools);
}
setLoadingState("upserting");
await upsertToolsIfNeeded(client, baseURL);
// Handle --link/--unlink after upserting tools
if (shouldLink || shouldUnlink) {
if (!agentIdArg) {
console.error("Error: --link/--unlink requires --agent <id>");
process.exit(1);
}
setLoadingState("updating_tools");
const { linkToolsToAgent, unlinkToolsFromAgent } = await import(
"./agent/modify"
);
const result = shouldLink
? await linkToolsToAgent(agentIdArg)
: await unlinkToolsFromAgent(agentIdArg);
if (!result.success) {
console.error(`${result.message}`);
process.exit(1);
}
}
// Load toolset: use explicit --toolset flag if provided, otherwise derive from model
// NOTE: We don't persist toolset per-agent. On resume, toolset is re-derived from model.
// If explicit toolset overrides need to persist, see comment in tools/toolset.ts
const modelForTools = getModelForToolLoading(
model,
toolset as "codex" | "default" | undefined,
);
await loadTools(modelForTools);
setLoadingState("initializing");
const { createAgent } = await import("./agent/create");
@@ -1104,47 +1032,20 @@ async function main(): Promise<void> {
// Priority 3: Check if --new flag was passed - create new agent
if (!agent && forceNew) {
const updateArgs = getModelUpdateArgs(model);
try {
const result = await createAgent(
undefined,
model,
undefined,
updateArgs,
skillsDirectory,
true, // parallelToolCalls always enabled
sleeptimeFlag ?? settings.enableSleeptime,
systemPromptPreset,
initBlocks,
baseTools,
);
agent = result.agent;
setAgentProvenance(result.provenance);
} catch (err) {
// Check if tools are missing on server (stale hash cache)
if (isToolsNotFoundError(err)) {
console.warn(
"Tools missing on server, re-uploading and retrying...",
);
await forceUpsertTools(client, baseURL);
// Retry agent creation
const result = await createAgent(
undefined,
model,
undefined,
updateArgs,
skillsDirectory,
true,
sleeptimeFlag ?? settings.enableSleeptime,
systemPromptPreset,
initBlocks,
baseTools,
);
agent = result.agent;
setAgentProvenance(result.provenance);
} else {
throw err;
}
}
const result = await createAgent(
undefined,
model,
undefined,
updateArgs,
skillsDirectory,
true, // parallelToolCalls always enabled
sleeptimeFlag ?? settings.enableSleeptime,
systemPromptPreset,
initBlocks,
baseTools,
);
agent = result.agent;
setAgentProvenance(result.provenance);
}
// Priority 4: Try to resume from project settings LRU (.letta/settings.local.json)
@@ -1181,47 +1082,20 @@ async function main(): Promise<void> {
// Priority 7: Create a new agent
if (!agent) {
const updateArgs = getModelUpdateArgs(model);
try {
const result = await createAgent(
undefined,
model,
undefined,
updateArgs,
skillsDirectory,
true, // parallelToolCalls always enabled
sleeptimeFlag ?? settings.enableSleeptime,
systemPromptPreset,
undefined,
undefined,
);
agent = result.agent;
setAgentProvenance(result.provenance);
} catch (err) {
// Check if tools are missing on server (stale hash cache)
if (isToolsNotFoundError(err)) {
console.warn(
"Tools missing on server, re-uploading and retrying...",
);
await forceUpsertTools(client, baseURL);
// Retry agent creation
const result = await createAgent(
undefined,
model,
undefined,
updateArgs,
skillsDirectory,
true,
sleeptimeFlag ?? settings.enableSleeptime,
systemPromptPreset,
undefined,
undefined,
);
agent = result.agent;
setAgentProvenance(result.provenance);
} else {
throw err;
}
}
const result = await createAgent(
undefined,
model,
undefined,
updateArgs,
skillsDirectory,
true, // parallelToolCalls always enabled
sleeptimeFlag ?? settings.enableSleeptime,
systemPromptPreset,
undefined,
undefined,
);
agent = result.agent;
setAgentProvenance(result.provenance);
}
// Ensure local project settings are loaded before updating

View File

@@ -29,8 +29,6 @@ export interface Settings {
refreshToken?: string; // DEPRECATED: kept for migration, now stored in secrets
tokenExpiresAt?: number; // Unix timestamp in milliseconds
deviceId?: string;
// Tool upsert cache: maps serverUrl -> hash of upserted tools
toolUpsertHashes?: Record<string, string>;
// Anthropic OAuth
anthropicOAuth?: {
access_token: string;

View File

@@ -1,222 +0,0 @@
import { afterAll, beforeAll, describe, expect, test } from "bun:test";
import { Letta } from "@letta-ai/letta-client";
import { linkToolsToAgent, unlinkToolsFromAgent } from "../../agent/modify";
import { settingsManager } from "../../settings-manager";
import { getToolNames, loadTools } from "../../tools/manager";
// Skip these integration tests if LETTA_API_KEY is not set
const shouldSkip = !process.env.LETTA_API_KEY;
const describeOrSkip = shouldSkip ? describe.skip : describe;
describeOrSkip("Link/Unlink Tools", () => {
let client: Letta;
let testAgentId: string;
beforeAll(async () => {
// Initialize settings and load tools
await settingsManager.initialize();
await loadTools();
// Create a test agent
const apiKey = process.env.LETTA_API_KEY;
if (!apiKey) {
throw new Error("LETTA_API_KEY required for tests");
}
client = new Letta({
apiKey,
defaultHeaders: { "X-Letta-Source": "letta-code" },
});
const agent = await client.agents.create({
model: "openai/gpt-4o-mini",
embedding: "openai/text-embedding-3-small",
memory_blocks: [
{ label: "human", value: "Test user" },
{ label: "persona", value: "Test agent" },
],
tools: [],
});
testAgentId = agent.id;
});
afterAll(async () => {
// Cleanup: delete test agent
if (testAgentId) {
try {
await client.agents.delete(testAgentId);
} catch (_error) {
// Ignore cleanup errors
}
}
});
test("linkToolsToAgent attaches all Letta Code tools", async () => {
// Reset: ensure tools are not already attached
await unlinkToolsFromAgent(testAgentId);
const result = await linkToolsToAgent(testAgentId);
expect(result.success).toBe(true);
expect(result.addedCount).toBeGreaterThan(0);
// Verify tools were attached
const agent = await client.agents.retrieve(testAgentId, {
include: ["agent.tools"],
});
const toolNames = agent.tools?.map((t) => t.name) || [];
const lettaCodeTools = getToolNames();
for (const toolName of lettaCodeTools) {
expect(toolNames).toContain(toolName);
}
}, 30000);
test("linkToolsToAgent adds approval rules for all tools", async () => {
// First unlink to reset
await unlinkToolsFromAgent(testAgentId);
// Link tools
await linkToolsToAgent(testAgentId);
// Verify approval rules were added
const agent = await client.agents.retrieve(testAgentId, {
include: ["agent.tools"],
});
const approvalRules = agent.tool_rules?.filter(
(rule) => rule.type === "requires_approval",
);
const lettaCodeTools = getToolNames();
expect(approvalRules?.length).toBe(lettaCodeTools.length);
// Check all Letta Code tools have approval rules
const rulesToolNames = approvalRules?.map((r) => r.tool_name) || [];
for (const toolName of lettaCodeTools) {
expect(rulesToolNames).toContain(toolName);
}
}, 30000);
test("linkToolsToAgent returns success when tools already attached", async () => {
// Reset and link once
await unlinkToolsFromAgent(testAgentId);
await linkToolsToAgent(testAgentId);
// Link again
const result = await linkToolsToAgent(testAgentId);
expect(result.success).toBe(true);
expect(result.addedCount).toBe(0);
expect(result.message).toContain("already attached");
}, 30000);
test("unlinkToolsFromAgent removes all Letta Code tools", async () => {
// First link tools
await linkToolsToAgent(testAgentId);
// Then unlink
const result = await unlinkToolsFromAgent(testAgentId);
expect(result.success).toBe(true);
expect(result.removedCount).toBeGreaterThan(0);
// Verify tools were removed
const agent = await client.agents.retrieve(testAgentId, {
include: ["agent.tools"],
});
const toolNames = agent.tools?.map((t) => t.name) || [];
const lettaCodeTools = getToolNames();
for (const toolName of lettaCodeTools) {
expect(toolNames).not.toContain(toolName);
}
}, 30000);
test("unlinkToolsFromAgent removes approval rules", async () => {
// First link tools
await linkToolsToAgent(testAgentId);
// Then unlink
await unlinkToolsFromAgent(testAgentId);
// Verify approval rules were removed
const agent = await client.agents.retrieve(testAgentId, {
include: ["agent.tools"],
});
const approvalRules = agent.tool_rules?.filter(
(rule) => rule.type === "requires_approval",
);
const lettaCodeTools = new Set(getToolNames());
const remainingApprovalRules = approvalRules?.filter((r) =>
lettaCodeTools.has(r.tool_name),
);
expect(remainingApprovalRules?.length || 0).toBe(0);
}, 30000);
test("unlinkToolsFromAgent preserves non-Letta-Code tools", async () => {
// Link Letta Code tools
await linkToolsToAgent(testAgentId);
// Attach memory tool
const memoryToolsResponse = await client.tools.list({ name: "memory" });
const memoryTool = memoryToolsResponse.items[0];
if (memoryTool?.id) {
await client.agents.tools.attach(memoryTool.id, {
agent_id: testAgentId,
});
}
// Unlink Letta Code tools
await unlinkToolsFromAgent(testAgentId);
// Verify memory tool is still there
const agent = await client.agents.retrieve(testAgentId, {
include: ["agent.tools"],
});
const toolNames = agent.tools?.map((t) => t.name) || [];
expect(toolNames).toContain("memory");
// Verify Letta Code tools are gone
const lettaCodeTools = getToolNames();
for (const toolName of lettaCodeTools) {
expect(toolNames).not.toContain(toolName);
}
}, 30000);
test("unlinkToolsFromAgent preserves non-approval tool_rules", async () => {
// Link tools
await linkToolsToAgent(testAgentId);
// Add a continue_loop rule manually
const agent = await client.agents.retrieve(testAgentId, {
include: ["agent.tools"],
});
const newToolRules = [
...(agent.tool_rules || []),
{
tool_name: "memory",
type: "continue_loop" as const,
prompt_template: "Test rule",
},
];
await client.agents.update(testAgentId, { tool_rules: newToolRules });
// Unlink Letta Code tools
await unlinkToolsFromAgent(testAgentId);
// Verify continue_loop rule is still there
const updatedAgent = await client.agents.retrieve(testAgentId, {
include: ["agent.tools"],
});
const continueLoopRules = updatedAgent.tool_rules?.filter(
(r) => r.type === "continue_loop" && r.tool_name === "memory",
);
expect(continueLoopRules?.length).toBe(1);
}, 30000);
});

View File

@@ -1,9 +1,3 @@
import { createHash } from "node:crypto";
import type Letta from "@letta-ai/letta-client";
import {
AuthenticationError,
PermissionDeniedError,
} from "@letta-ai/letta-client";
import { getModelInfo } from "../agent/model";
import { getAllSubagentConfigs } from "../agent/subagents";
import { INTERRUPTED_BY_USER } from "../constants";
@@ -255,33 +249,28 @@ function resolveInternalToolName(name: string): string | undefined {
}
/**
* Generates a Python stub for a tool that will be executed client-side.
* This is registered with Letta so the agent knows about the tool.
* ClientTool interface matching the Letta SDK's expected format.
* Used when passing client-side tools via the client_tools field.
*/
function generatePythonStub(
name: string,
_description: string,
schema: JsonSchema,
): string {
const params = (schema.properties ?? {}) as Record<string, JsonSchema>;
const required = schema.required ?? [];
export interface ClientTool {
name: string;
description?: string | null;
parameters?: { [key: string]: unknown } | null;
}
// Split parameters into required and optional
const allKeys = Object.keys(params);
const requiredParams = allKeys.filter((key) => required.includes(key));
const optionalParams = allKeys.filter((key) => !required.includes(key));
// Generate function parameters: required first, then optional with defaults
const paramList = [
...requiredParams,
...optionalParams.map((key) => `${key}=None`),
].join(", ");
return `def ${name}(${paramList}):
"""Stub method. This tool is executed client-side via the approval flow.
"""
raise Exception("This is a stub tool. Execution should happen on client.")
`;
/**
* Get all loaded tools in the format expected by the Letta API's client_tools field.
* Maps internal tool names to server-facing names for proper tool invocation.
*/
export function getClientToolsFromRegistry(): ClientTool[] {
return Array.from(toolRegistry.entries()).map(([name, tool]) => {
const serverName = getServerToolName(name);
return {
name: serverName,
description: tool.schema.description,
parameters: tool.schema.input_schema,
};
});
}
/**
@@ -554,203 +543,6 @@ function injectSubagentsIntoTaskDescription(
return `${before}## Available Agents\n\n${agentsSection}\n\n${after}`;
}
/**
* Upserts all loaded tools to the Letta server with retry logic.
* This registers Python stubs so the agent knows about the tools,
* while actual execution happens client-side via the approval flow.
*
* Implements resilient retry logic:
* - Retries if a single upsert attempt exceeds the per-attempt timeout
* - Keeps retrying up to 30 seconds total
* - Uses exponential backoff between retries
*
* @param client - Letta client instance
* @returns Promise that resolves when all tools are registered
*/
export async function upsertToolsToServer(client: Letta): Promise<void> {
const OPERATION_TIMEOUT = 20000; // 20 seconds
const MAX_TOTAL_TIME = 30000; // 30 seconds
const startTime = Date.now();
async function attemptUpsert(retryCount: number = 0): Promise<void> {
const attemptStartTime = Date.now();
// Check if we've exceeded total time budget
if (Date.now() - startTime > MAX_TOTAL_TIME) {
throw new Error(
"Tool upserting exceeded maximum time limit (30s). Please check your network connection and try again.",
);
}
try {
// Create a timeout promise
const timeoutPromise = new Promise<never>((_, reject) => {
setTimeout(() => {
reject(
new Error(
`Tool upsert operation timed out (${OPERATION_TIMEOUT / 1000}s)`,
),
);
}, OPERATION_TIMEOUT);
});
// Race the upsert against the timeout
const upsertPromise = Promise.all(
Array.from(toolRegistry.entries()).map(async ([name, tool]) => {
// Get the server-facing tool name (may differ from internal name)
const serverName = TOOL_NAME_MAPPINGS[name as ToolName] || name;
const pythonStub = generatePythonStub(
serverName,
tool.schema.description,
tool.schema.input_schema,
);
// Construct the full JSON schema in Letta's expected format
const fullJsonSchema = {
name: serverName,
description: tool.schema.description,
parameters: tool.schema.input_schema,
};
await client.tools.upsert({
default_requires_approval: true,
source_code: pythonStub,
json_schema: fullJsonSchema,
});
}),
);
await Promise.race([upsertPromise, timeoutPromise]);
// Success! Operation completed within timeout
return;
} catch (error) {
const elapsed = Date.now() - attemptStartTime;
const totalElapsed = Date.now() - startTime;
// Check if this is an auth error - fail immediately without retrying
if (
error instanceof AuthenticationError ||
error instanceof PermissionDeniedError
) {
throw new Error(
`Authentication failed. Please check your LETTA_API_KEY.\n` +
`Run 'rm ~/.letta/settings.json' and restart to re-authenticate.\n` +
`Original error: ${error.message}`,
);
}
// If we still have time, retry with exponential backoff
if (totalElapsed < MAX_TOTAL_TIME) {
const backoffDelay = Math.min(1000 * 2 ** retryCount, 5000); // Max 5s backoff
const remainingTime = MAX_TOTAL_TIME - totalElapsed;
console.error(
`Tool upsert attempt ${retryCount + 1} failed after ${elapsed}ms. Retrying in ${backoffDelay}ms... (${Math.round(remainingTime / 1000)}s remaining)`,
);
console.error(
`Error: ${error instanceof Error ? error.message : String(error)}`,
);
await new Promise((resolve) => setTimeout(resolve, backoffDelay));
return attemptUpsert(retryCount + 1);
}
// Out of time, throw the error
throw error;
}
}
await attemptUpsert();
}
/**
* Compute a hash of all currently loaded tools for cache invalidation.
* Includes tool names and schemas to detect any changes.
*/
export function computeToolsHash(): string {
const toolData = Array.from(toolRegistry.entries())
.sort(([a], [b]) => a.localeCompare(b)) // deterministic order
.map(([name, tool]) => ({
name,
serverName: getServerToolName(name),
schema: tool.schema,
}));
return createHash("sha256")
.update(JSON.stringify(toolData))
.digest("hex")
.slice(0, 16); // short hash is sufficient
}
/**
* Upserts tools only if the tool definitions have changed since last upsert.
* Uses a hash of loaded tools cached in settings to skip redundant upserts.
*
* @param client - Letta client instance
* @param serverUrl - The server URL (used as cache key)
* @returns true if upsert was performed, false if skipped
*/
export async function upsertToolsIfNeeded(
client: Letta,
serverUrl: string,
): Promise<boolean> {
const currentHash = computeToolsHash();
const { settingsManager } = await import("../settings-manager");
const cachedHashes = settingsManager.getSetting("toolUpsertHashes") || {};
if (cachedHashes[serverUrl] === currentHash) {
// Tools unchanged, skip upsert
return false;
}
// Perform upsert
await upsertToolsToServer(client);
// Save new hash
settingsManager.updateSettings({
toolUpsertHashes: { ...cachedHashes, [serverUrl]: currentHash },
});
return true;
}
/**
* Force upsert tools by clearing the hash cache for the server.
* Use this when tools are missing on the server despite the hash matching.
*
* @param client - Letta client instance
* @param serverUrl - The server URL (used as cache key)
*/
export async function forceUpsertTools(
client: Letta,
serverUrl: string,
): Promise<void> {
const { settingsManager } = await import("../settings-manager");
const cachedHashes = settingsManager.getSetting("toolUpsertHashes") || {};
// Clear the hash for this server to force re-upsert
delete cachedHashes[serverUrl];
settingsManager.updateSettings({ toolUpsertHashes: cachedHashes });
// Now upsert (will always run since hash was cleared)
await upsertToolsIfNeeded(client, serverUrl);
}
/**
* Check if an error indicates tools are missing on the server.
* This can happen when the local hash cache is stale (tools were deleted server-side).
*/
export function isToolsNotFoundError(error: unknown): boolean {
if (error && typeof error === "object" && "message" in error) {
const message = String((error as { message: string }).message);
return message.includes("Tools not found by name");
}
return false;
}
/**
* Helper to clip tool return text to a reasonable display size
* Used by UI components to truncate long responses for display

View File

@@ -1,28 +1,19 @@
import type Letta from "@letta-ai/letta-client";
import { getClient, getServerUrl } from "../agent/client";
import { getClient } from "../agent/client";
import { resolveModel } from "../agent/model";
import { linkToolsToAgent, unlinkToolsFromAgent } from "../agent/modify";
import { toolFilter } from "./filter";
import {
ANTHROPIC_DEFAULT_TOOLS,
clearTools,
GEMINI_DEFAULT_TOOLS,
GEMINI_PASCAL_TOOLS,
getToolNames,
isOpenAIModel,
loadSpecificTools,
loadTools,
OPENAI_DEFAULT_TOOLS,
OPENAI_PASCAL_TOOLS,
upsertToolsIfNeeded,
} from "./manager";
// Use the same toolset definitions from manager.ts (single source of truth)
const ANTHROPIC_TOOLS = ANTHROPIC_DEFAULT_TOOLS;
// Toolset definitions from manager.ts (single source of truth)
const CODEX_TOOLS = OPENAI_PASCAL_TOOLS;
const CODEX_SNAKE_TOOLS = OPENAI_DEFAULT_TOOLS;
const GEMINI_TOOLS = GEMINI_PASCAL_TOOLS;
const GEMINI_SNAKE_TOOLS = GEMINI_DEFAULT_TOOLS;
// Toolset type including snake_case variants
export type ToolsetName =
@@ -33,88 +24,83 @@ export type ToolsetName =
| "gemini_snake"
| "none";
// Server-side/base tools that should stay attached regardless of Letta toolset
export const BASE_TOOL_NAMES = ["memory", "web_search"];
/**
* Gets the list of Letta Code tools currently attached to an agent.
* Returns the tool names that are both attached to the agent AND in our tool definitions.
* Ensures the correct memory tool is attached to the agent based on the model.
* - OpenAI/Codex models use memory_apply_patch
* - Claude/Gemini models use memory
*
* This is a server-side tool swap - client tools are passed via client_tools per-request.
*
* @param agentId - The agent ID to update
* @param modelIdentifier - Model handle to determine which memory tool to use
* @param useMemoryPatch - Optional override: true = use memory_apply_patch, false = use memory
*/
export async function getAttachedLettaTools(
client: Letta,
export async function ensureCorrectMemoryTool(
agentId: string,
): Promise<string[]> {
const agent = await client.agents.retrieve(agentId, {
include: ["agent.tools"],
});
modelIdentifier: string,
useMemoryPatch?: boolean,
): Promise<void> {
const resolvedModel = resolveModel(modelIdentifier) ?? modelIdentifier;
const client = await getClient();
const shouldUsePatch =
useMemoryPatch !== undefined
? useMemoryPatch
: isOpenAIModel(resolvedModel);
const toolNames =
agent.tools
?.map((t) => t.name)
.filter((name): name is string => typeof name === "string") || [];
try {
const agentWithTools = await client.agents.retrieve(agentId, {
include: ["agent.tools"],
});
const currentTools = agentWithTools.tools || [];
const mapByName = new Map(currentTools.map((t) => [t.name, t.id]));
// Get all possible Letta Code tool names
const allLettaTools: string[] = [
...CODEX_TOOLS,
...CODEX_SNAKE_TOOLS,
...ANTHROPIC_TOOLS,
...GEMINI_TOOLS,
...GEMINI_SNAKE_TOOLS,
];
// Determine which memory tool we want
// Only OpenAI (Codex) uses memory_apply_patch; Claude and Gemini use memory
const desiredMemoryTool = shouldUsePatch ? "memory_apply_patch" : "memory";
const otherMemoryTool =
desiredMemoryTool === "memory" ? "memory_apply_patch" : "memory";
// Return intersection: tools that are both attached AND in our definitions
return toolNames.filter((name) => allLettaTools.includes(name));
}
// Ensure desired memory tool attached
let desiredId = mapByName.get(desiredMemoryTool);
if (!desiredId) {
const resp = await client.tools.list({ name: desiredMemoryTool });
desiredId = resp.items[0]?.id;
}
if (!desiredId) {
// No warning needed - the tool might not exist on this server
return;
}
/**
* Detects which toolset is attached to an agent by examining its tools.
* Returns the toolset name based on majority, or null if no Letta Code tools.
*/
export async function detectToolsetFromAgent(
client: Letta,
agentId: string,
): Promise<ToolsetName | null> {
const attachedTools = await getAttachedLettaTools(client, agentId);
const otherId = mapByName.get(otherMemoryTool);
if (attachedTools.length === 0) {
return null;
// Check if swap is needed
if (mapByName.has(desiredMemoryTool) && !otherId) {
// Already has the right tool, no swap needed
return;
}
const currentIds = currentTools
.map((t) => t.id)
.filter((id): id is string => typeof id === "string");
const newIds = new Set(currentIds);
if (otherId) newIds.delete(otherId);
newIds.add(desiredId);
const updatedRules = (agentWithTools.tool_rules || []).map((r) =>
r.tool_name === otherMemoryTool
? { ...r, tool_name: desiredMemoryTool }
: r,
);
await client.agents.update(agentId, {
tool_ids: Array.from(newIds),
tool_rules: updatedRules,
});
} catch (err) {
console.warn(
`Warning: Failed to sync memory tool: ${err instanceof Error ? err.message : String(err)}`,
);
}
const codexToolNames: string[] = [...CODEX_TOOLS];
const codexSnakeToolNames: string[] = [...CODEX_SNAKE_TOOLS];
const anthropicToolNames: string[] = [...ANTHROPIC_TOOLS];
const geminiToolNames: string[] = [...GEMINI_TOOLS];
const geminiSnakeToolNames: string[] = [...GEMINI_SNAKE_TOOLS];
const codexCount = attachedTools.filter((name) =>
codexToolNames.includes(name),
).length;
const codexSnakeCount = attachedTools.filter((name) =>
codexSnakeToolNames.includes(name),
).length;
const anthropicCount = attachedTools.filter((name) =>
anthropicToolNames.includes(name),
).length;
const geminiCount = attachedTools.filter((name) =>
geminiToolNames.includes(name),
).length;
const geminiSnakeCount = attachedTools.filter((name) =>
geminiSnakeToolNames.includes(name),
).length;
// Return whichever has the most tools attached
const max = Math.max(
codexCount,
codexSnakeCount,
anthropicCount,
geminiCount,
geminiSnakeCount,
);
if (geminiSnakeCount === max) return "gemini_snake";
if (geminiCount === max) return "gemini";
if (codexSnakeCount === max) return "codex_snake";
if (codexCount === max) return "codex";
return "default";
}
/**
@@ -131,94 +117,43 @@ export async function forceToolsetSwitch(
clearTools();
// Load the appropriate toolset
// Map toolset name to a model identifier for loading
let modelForLoading: string;
if (toolsetName === "none") {
// Just clear tools
clearTools();
// Just clear tools, no loading needed
return;
} else if (toolsetName === "codex") {
await loadSpecificTools([...CODEX_TOOLS]);
modelForLoading = "openai/gpt-4";
} else if (toolsetName === "codex_snake") {
await loadTools("openai/gpt-4");
modelForLoading = "openai/gpt-4";
} else if (toolsetName === "gemini") {
await loadSpecificTools([...GEMINI_TOOLS]);
modelForLoading = "google_ai/gemini-3-pro-preview";
} else if (toolsetName === "gemini_snake") {
await loadTools("google_ai/gemini-3-pro-preview");
modelForLoading = "google_ai/gemini-3-pro-preview";
} else {
await loadTools("anthropic/claude-sonnet-4");
modelForLoading = "anthropic/claude-sonnet-4";
}
// Upsert the new toolset to server (with hash-based caching)
const client = await getClient();
const serverUrl = getServerUrl();
await upsertToolsIfNeeded(client, serverUrl);
// Ensure base memory tool is correct for the toolset
// Codex uses memory_apply_patch; Claude and Gemini use memory
const useMemoryPatch =
toolsetName === "codex" || toolsetName === "codex_snake";
await ensureCorrectMemoryTool(agentId, modelForLoading, useMemoryPatch);
// Remove old Letta tools and add new ones (or just remove if none)
await unlinkToolsFromAgent(agentId);
if (toolsetName !== "none") {
await linkToolsToAgent(agentId);
}
// Ensure base memory tool uses memory_apply_patch instead of legacy memory
try {
const agent = await client.agents.retrieve(agentId, {
include: ["agent.tools"],
});
const currentTools = agent.tools || [];
const mapByName = new Map(currentTools.map((t) => [t.name, t.id]));
// Determine which memory tool we want based on toolset
const desiredMemoryTool =
toolsetName === "default" ? "memory" : "memory_apply_patch";
const otherMemoryTool =
desiredMemoryTool === "memory" ? "memory_apply_patch" : "memory";
// Ensure desired memory tool is attached
let desiredId = mapByName.get(desiredMemoryTool);
if (!desiredId) {
const resp = await client.tools.list({ name: desiredMemoryTool });
desiredId = resp.items[0]?.id;
}
if (!desiredId) {
console.warn(
`Could not find tool id for ${desiredMemoryTool}. Keeping existing memory tool if present.`,
);
}
const otherId = mapByName.get(otherMemoryTool);
// Build new tool_ids: add desired memory tool, remove the other if present
const currentIds = currentTools
.map((t) => t.id)
.filter((id): id is string => typeof id === "string");
const newIds = new Set(currentIds);
// Only swap if we have a valid desired tool id; otherwise keep existing
if (desiredId) {
if (otherId) newIds.delete(otherId);
newIds.add(desiredId);
}
// Update tool_rules: rewrite any rules targeting the other tool to the desired tool
const updatedRules = (agent.tool_rules || []).map((r) =>
r.tool_name === otherMemoryTool
? { ...r, tool_name: desiredMemoryTool }
: r,
);
await client.agents.update(agentId, {
tool_ids: Array.from(newIds),
tool_rules: updatedRules,
});
} catch (err) {
console.warn(
`Warning: Failed to enforce memory_apply_patch base tool: ${err instanceof Error ? err.message : String(err)}`,
);
}
// NOTE: Toolset is not persisted. On resume, we derive from agent's model.
// If we want to persist explicit toolset overrides in the future, add:
// agentToolsets: Record<string, ToolsetName> to Settings (global, since agent IDs are UUIDs)
// and save here: settingsManager.updateSettings({ agentToolsets: { ...current, [agentId]: toolsetName } })
}
/**
* Switches the loaded toolset based on the target model identifier,
* upserts the tools to the server, and relinks them to the agent.
* and ensures the correct memory tool is attached to the agent.
*
* @param modelIdentifier - The model handle/id
* @param agentId - Agent to relink tools to
@@ -250,70 +185,8 @@ export async function switchToolsetForModel(
}
}
// Upsert the new toolset (stored in the tool registry) to server (with hash-based caching)
const client = await getClient();
const serverUrl = getServerUrl();
await upsertToolsIfNeeded(client, serverUrl);
// Remove old Letta tools and add new ones
await unlinkToolsFromAgent(agentId);
await linkToolsToAgent(agentId);
// Ensure base memory tool uses memory_apply_patch instead of legacy memory
try {
const agentWithTools = await client.agents.retrieve(agentId, {
include: ["agent.tools"],
});
const currentTools = agentWithTools.tools || [];
const mapByName = new Map(currentTools.map((t) => [t.name, t.id]));
// Determine which memory tool we want based on provider
const desiredMemoryTool = isOpenAIModel(resolvedModel)
? "memory_apply_patch"
: (await import("./manager")).isGeminiModel(resolvedModel)
? "memory_apply_patch"
: "memory";
const otherMemoryTool =
desiredMemoryTool === "memory" ? "memory_apply_patch" : "memory";
// Ensure desired memory tool attached
let desiredId = mapByName.get(desiredMemoryTool);
if (!desiredId) {
const resp = await client.tools.list({ name: desiredMemoryTool });
desiredId = resp.items[0]?.id;
}
if (!desiredId) {
console.warn(
`Could not find tool id for ${desiredMemoryTool}. Keeping existing memory tool if present.`,
);
}
const otherId = mapByName.get(otherMemoryTool);
const currentIds = currentTools
.map((t) => t.id)
.filter((id): id is string => typeof id === "string");
const newIds = new Set(currentIds);
if (desiredId) {
if (otherId) newIds.delete(otherId);
newIds.add(desiredId);
}
const updatedRules = (agentWithTools.tool_rules || []).map((r) =>
r.tool_name === otherMemoryTool
? { ...r, tool_name: desiredMemoryTool }
: r,
);
await client.agents.update(agentId, {
tool_ids: Array.from(newIds),
tool_rules: updatedRules,
});
} catch (err) {
console.warn(
`Warning: Failed to enforce memory_apply_patch base tool: ${err instanceof Error ? err.message : String(err)}`,
);
}
// Ensure base memory tool is correct for the model
await ensureCorrectMemoryTool(agentId, resolvedModel);
const { isGeminiModel } = await import("./manager");
const toolsetName = isOpenAIModel(resolvedModel)
@@ -321,5 +194,7 @@ export async function switchToolsetForModel(
: isGeminiModel(resolvedModel)
? "gemini"
: "default";
// NOTE: Toolset is derived from model, not persisted. See comment in forceToolsetSwitch.
return toolsetName;
}