diff --git a/fern/changelog/2025-03-02.mdx b/fern/changelog/2025-03-02.mdx
deleted file mode 100644
index 5e6e01ab..00000000
--- a/fern/changelog/2025-03-02.mdx
+++ /dev/null
@@ -1,29 +0,0 @@
-## Added List Run Steps API
-
-We've introduced a new API endpoint that allows you to list all steps associated with a specific run. This feature makes it easier to track and analyze the sequence of steps performed during a run.
-
-
-```python title="python"
-from letta_client import Letta
-client = Letta(
- token="YOUR_API_KEY",
-)
-steps = client.runs.list_run_steps(
- run_id="RUN_ID",
-)
-for step in steps:
- print(f"Step ID: {step.id}, Tokens: {step.total_tokens}")
-```
-```typescript title="node.js"
-import { LettaClient } from '@letta-ai/letta-client';
-const client = new LettaClient({
- token: "YOUR_API_KEY",
-});
-const steps = await client.runs.steps.list({
- run_id: "RUN_ID",
-});
-steps.forEach(step => {
- console.log(`Step ID: ${step.id}, Tokens: ${step.total_tokens}`);
-});
-```
-
diff --git a/fern/pages/agents/long_running.mdx b/fern/pages/agents/long_running.mdx
deleted file mode 100644
index 0b67207f..00000000
--- a/fern/pages/agents/long_running.mdx
+++ /dev/null
@@ -1,602 +0,0 @@
----
-title: Long-Running Executions
-slug: guides/agents/long-running
-subtitle: How to handle long-running agent executions
----
-
-When agents need to execute multiple tool calls or perform complex operations (like deep research, data analysis, or multi-step workflows), processing time can vary significantly.
-
-Letta supports various ways to handle long-running agents, so you can choose the approach that best fits your use case:
-
-| Use Case | Duration | Recommendedation | Key Benefits |
-|----------|----------|---------------------|-------------|
-| Few-step invocations | < 1 minute | [Standard streaming](/guides/agents/streaming) | Simplest approach |
-| Variable length runs | 1-10 minutes | **Background mode** (Keepalive + Timeout as a second choice) | Easy way to reduce timeouts |
-| Deep research | 10+ minutes | **Background mode**, or async polling | Survives disconnects, resumable streams |
-| Batch jobs | Any | **Async polling** | Fire-and-forget, check results later |
-
-## Option 1: Background Mode with Resumable Streaming
-
-
-**Best for:** Operations exceeding 10 minutes, unreliable network connections, or critical workflows that must complete regardless of client connectivity.
-
-**Trade-off:** Slightly higher latency to first token due to background task initialization.
-
-
-Background mode decouples agent execution from your client connection. The agent processes your request on the server while streaming results to a persistent store, allowing you to reconnect and resume from any point — even if your application crashes or network fails.
-
-
-```curl curl maxLines=50
-curl --request POST \
- --url https://api.letta.com/v1/agents/$AGENT_ID/messages/stream \
- --header 'Authorization: Bearer $LETTA_API_KEY' \
- --header 'Content-Type: application/json' \
- --data '{
- "messages": [
- {
- "role": "user",
- "content": "Run comprehensive analysis on this dataset"
- }
- ],
- "stream_tokens": true,
- "background": true
-}'
-
-# Response stream includes run_id and seq_id for each chunk:
-data: {"run_id":"run-123","seq_id":0,"message_type":"reasoning_message","reasoning":"Analyzing"}
-data: {"run_id":"run-123","seq_id":1,"message_type":"reasoning_message","reasoning":" the dataset"}
-data: {"run_id":"run-123","seq_id":2,"message_type":"tool_call","tool_call":{...}}
-# ... stream continues
-
-# Step 2: If disconnected, resume from last received seq_id
-curl --request GET \
- --url https://api.letta.com/v1/runs/$RUN_ID/stream \
- --header 'Accept: text/event-stream' \
- --data '{
- "starting_after": 57
-}'
-```
-```python python maxLines=50
-stream = client.agents.messages.create_stream(
- agent_id=agent_state.id,
- messages=[
- {
- "role": "user",
- "content": "Run comprehensive analysis on this dataset"
- }
- ],
- stream_tokens=True,
- background=True,
-)
-run_id = None
-last_seq_id = None
-for chunk in stream:
- if hasattr(chunk, "run_id") and hasattr(chunk, "seq_id"):
- run_id = chunk.run_id # Save this to reconnect if your connection drops
- last_seq_id = chunk.seq_id # Save this as your resumption point for cursor-based pagination
- print(chunk)
-
-# If disconnected, resume from last received seq_id:
-for chunk in client.runs.stream(run_id, starting_after=last_seq_id):
- print(chunk)
-```
-```typescript TypeScript maxLines=50
-const stream = await client.agents.messages.createStream({
- agentId: agentState.id,
- requestBody: {
- messages: [
- {
- role: "user",
- content: "Run comprehensive analysis on this dataset"
- }
- ],
- streamTokens: true,
- background: true,
- }
-});
-
-let runId = null;
-let lastSeqId = null;
-for await (const chunk of stream) {
- if (chunk.run_id && chunk.seq_id) {
- runId = chunk.run_id; // Save this to reconnect if your connection drops
- lastSeqId = chunk.seq_id; // Save this as your resumption point for cursor-based pagination
- }
- console.log(chunk);
-}
-
-// If disconnected, resume from last received seq_id
-for await (const chunk of client.runs.stream(runId, {startingAfter: lastSeqId})) {
- console.log(chunk);
-}
-```
-```python python maxLines=60
-# 1) Start background stream and capture approval request
-stream = client.agents.messages.create_stream(
- agent_id=agent.id,
- messages=[{"role": "user", "content": "Do a sensitive operation"}],
- stream_tokens=True,
- background=True,
-)
-
-approval_request_id = None
-orig_run_id = None
-last_seq_id = 0
-for chunk in stream:
- if hasattr(chunk, "run_id") and hasattr(chunk, "seq_id"):
- orig_run_id = chunk.run_id
- last_seq_id = chunk.seq_id
- if getattr(chunk, "message_type", None) == "approval_request_message":
- approval_request_id = chunk.id
- break
-
-# 2) Approve in background; capture the approval stream cursor (this creates a new run)
-approve = client.agents.messages.create_stream(
- agent_id=agent.id,
- messages=[{"type": "approval", "approve": True, "approval_request_id": approval_request_id}],
- stream_tokens=True,
- background=True,
-)
-
-run_id = None
-approve_seq = 0
-for chunk in approve:
- if hasattr(chunk, "run_id") and hasattr(chunk, "seq_id"):
- run_id = chunk.run_id
- approve_seq = chunk.seq_id
- if getattr(chunk, "message_type", None) == "tool_return_message":
- # Tool result arrives here on the approval stream
- break
-
-# 3) Resume that run to read follow-up tokens
-for chunk in client.runs.stream(run_id, starting_after=approve_seq):
- print(chunk)
-```
-```typescript TypeScript maxLines=60
-// 1) Start background stream and capture approval request
-const stream = await client.agents.messages.createStream(
- agent.id, {
- messages: [{role: "user", content: "Do a sensitive operation"}],
- streamTokens: true,
- background: true,
- }
-);
-
-let approvalRequestId = null;
-let origRunId = null;
-let lastSeqId = 0;
-for await (const chunk of stream) {
- if (chunk.runId && chunk.seqId) {
- origRunId = chunk.runId;
- lastSeqId = chunk.seqId;
- }
- if (chunk.messageType === "approval_request_message") {
- approvalRequestId = chunk.id;
- break;
- }
-}
-
-// 2) Approve in background; capture the approval stream cursor (this creates a new run)
-const approveStream = await client.agents.messages.createStream(
- agent.id, {
- messages: [{type: "approval", approve: true, approvalRequestId}],
- streamTokens: true,
- background: true,
- }
-);
-
-let runId = null;
-let approveSeq = 0;
-for await (const chunk of approveStream) {
- if (chunk.runId && chunk.seqId) {
- runId = chunk.runId;
- approveSeq = chunk.seqId;
- }
- if (chunk.messageType === "tool_return_message") {
- // Tool result arrives here on the approval stream
- break;
- }
-}
-
-// 3) Resume that run to read follow-up tokens
-for await (const chunk of client.runs.stream(runId, {startingAfter: approveSeq})) {
- console.log(chunk);
-}
-```
-
-
-### HITL in Background Mode
-
-When [Human‑in‑the‑Loop (HITL) approval](/guides/agents/human-in-the-loop) is enabled for a tool, your background stream may pause and emit an `approval_request_message`. In background mode, send the approval via a separate background stream and capture that stream’s `run_id`/`seq_id`.
-
-
-Approval responses in background mode emit the `tool_return_message` on the approval stream itself (with a new `run_id`, different from the original stream). Save the approval stream cursor, then resume with `runs.stream` to consume subsequent reasoning/assistant messages.
-
-
-
-```curl curl maxLines=70
-# 1) Start background stream; capture approval request
-curl --request POST \
- --url https://api.letta.com/v1/agents/$AGENT_ID/messages/stream \
- --header 'Authorization: Bearer $LETTA_API_KEY' \
- --header 'Content-Type: application/json' \
- --data '{
- "messages": [{"role": "user", "content": "Do a sensitive operation"}],
- "stream_tokens": true,
- "background": true
-}'
-
-# Example stream output (approval request arrives):
-data: {"run_id":"run-abc","seq_id":0,"message_type":"reasoning_message","reasoning":"..."}
-data: {"run_id":"run-abc","seq_id":1,"message_type":"approval_request_message","id":"message-abc","tool_call":{"name":"sensitive_operation","arguments":"{...}","tool_call_id":"tool-xyz"}}
-
-# 2) Approve in background; capture approval stream cursor (this creates a new run)
-curl --request POST \
- --url https://api.letta.com/v1/agents/$AGENT_ID/messages/stream \
- --header 'Authorization: Bearer $LETTA_API_KEY' \
- --header 'Content-Type: application/json' \
- --data '{
- "messages": [{"type": "approval", "approve": true, "approval_request_id": "message-abc"}],
- "stream_tokens": true,
- "background": true
-}'
-
-# Example approval stream output (tool result arrives here):
-data: {"run_id":"run-new","seq_id":0,"message_type":"tool_return_message","status":"success","tool_return":"..."}
-
-# 3) Resume the approval stream's run to continue
-curl --request GET \
- --url https://api.letta.com/v1/runs/$RUN_ID/stream \
- --header 'Accept: text/event-stream' \
- --data '{
- "starting_after": 0
-}'
-```
-```python python maxLines=70
-# 1) Start background stream and capture approval request
-stream = client.agents.messages.create_stream(
- agent_id=agent.id,
- messages=[{"role": "user", "content": "Do a sensitive operation"}],
- stream_tokens=True,
- background=True,
-)
-
-approval_request_id = None
-orig_run_id = None
-last_seq_id = 0
-for chunk in stream:
- if hasattr(chunk, "run_id") and hasattr(chunk, "seq_id"):
- orig_run_id = chunk.run_id
- last_seq_id = chunk.seq_id
- if getattr(chunk, "message_type", None) == "approval_request_message":
- approval_request_id = chunk.id
- break
-
-# 2) Approve in background; capture the approval stream cursor (this creates a new run)
-approve = client.agents.messages.create_stream(
- agent_id=agent.id,
- messages=[{"type": "approval", "approve": True, "approval_request_id": approval_request_id}],
- stream_tokens=True,
- background=True,
-)
-
-run_id = None
-approve_seq = 0
-for chunk in approve:
- if hasattr(chunk, "run_id") and hasattr(chunk, "seq_id"):
- run_id = chunk.run_id
- approve_seq = chunk.seq_id
- if getattr(chunk, "message_type", None) == "tool_return_message":
- # Tool result arrives here on the approval stream
- break
-
-# 3) Resume that run to read follow-up tokens
-for chunk in client.runs.stream(run_id, starting_after=approve_seq):
- print(chunk)
-```
-```typescript TypeScript maxLines=70
-// 1) Start background stream and capture approval request
-const stream = await client.agents.messages.createStream({
- agentId: agent.id,
- requestBody: {
- messages: [{ role: "user", content: "Do a sensitive operation" }],
- streamTokens: true,
- background: true,
- }
-});
-
-let approvalRequestId: string | null = null;
-let origRunId: string | null = null;
-let lastSeqId = 0;
-for await (const chunk of stream) {
- if (chunk.run_id && chunk.seq_id) { origRunId = chunk.run_id; lastSeqId = chunk.seq_id; }
- if (chunk.message_type === "approval_request_message") {
- approvalRequestId = chunk.id; break;
- }
-}
-
-// 2) Approve in background; capture the approval stream cursor (this creates a new run)
-const approve = await client.agents.messages.createStream({
- agentId: agent.id,
- requestBody: {
- messages: [{ type: "approval", approve: true, approvalRequestId }],
- streamTokens: true,
- background: true,
- }
-});
-
-let runId: string | null = null;
-let approveSeq = 0;
-for await (const chunk of approve) {
- if (chunk.run_id && chunk.seq_id) { runId = chunk.run_id; approveSeq = chunk.seq_id; }
- if (chunk.message_type === "tool_return_message") {
- // Tool result arrives here on the approval stream
- break;
- }
-}
-
-// 3) Resume that run to read follow-up tokens
-const resume = await client.runs.stream(runId!, { startingAfter: approveSeq });
-for await (const chunk of resume) {
- console.log(chunk);
-}
-```
-
-
-
-### Discovering and Resuming Active Streams
-
-When your application starts or recovers from a crash, you can check for any active background streams and resume them. This is particularly useful for:
-- **Application restarts**: Resume processing after deployments or crashes
-- **Load balancing**: Pick up streams started by other instances
-- **Monitoring**: Check progress of long-running operations from different clients
-
-
-```curl curl maxLines=50
-# Step 1: Find active background streams for your agents
-curl --request GET \
- --url https://api.letta.com/v1/runs/active \
- --header 'Authorization: Bearer $LETTA_API_KEY' \
- --header 'Content-Type: application/json' \
- --data '{
- "agent_ids": [
- "agent-123",
- "agent-456"
- ],
- "background": true
-}'
-# Returns: [{"run_id": "run-abc", "agent_id": "agent-123", "status": "processing", ...}]
-
-# Step 2: Resume streaming from the beginning (or any specified seq_id)
-curl --request GET \
- --url https://api.letta.com/v1/runs/$RUN_ID/stream \
- --header 'Accept: text/event-stream' \
- --data '{
- "starting_after": 0, # Start from beginning
- "batch_size": 1000 # Fetch historical chunks in larger batches
-}'
-```
-```python python maxLines=50
-# Find and resume active background streams
-active_runs = client.runs.active(
- agent_ids=["agent-123", "agent-456"],
- background=True,
-)
-
-if active_runs:
- # Resume the first active stream from the beginning
- run = active_runs[0]
- print(f"Resuming stream for run {run.id}, status: {run.status}")
-
- stream = client.runs.stream(
- run_id=run.id,
- starting_after=0, # Start from beginning
- batch_size=1000 # Fetch historical chunks in larger batches
- )
-
- # Each historical chunk is streamed one at a time, followed by new chunks as they become available
- for chunk in stream:
- print(chunk)
-```
-```typescript TypeScript maxLines=50
-// Find and resume active background streams
-const activeRuns = await client.runs.active({
- agentIds: ["agent-123", "agent-456"],
- background: true,
-});
-
-if (activeRuns.length > 0) {
- // Resume the first active stream from the beginning
- const run = activeRuns[0];
- console.log(`Resuming stream for run ${run.id}, status: ${run.status}`);
-
- const stream = await client.runs.stream(run.id, {
- startingAfter: 0, // Start from beginning
- batchSize: 1000 // Fetch historical chunks in larger batches
- });
-
- // Each historical chunk is streamed one at a time, followed by new chunks as they become available
- for await (const chunk of stream) {
- console.log(chunk);
- }
-}
-```
-
-
-## Option 2: Async Operations with Polling
-
-
-**Best for:** Usecases where you don't need real-time token streaming.
-
-
-Ideal for batch processing, scheduled jobs, or when you don't need real-time updates. The [async SDK method](/api-reference/agents/messages/create-async) queues your request and returns immediately, letting you check results later:
-
-
-```curl curl maxLines=50
-# Start async operation (returns immediately with run ID)
-curl --request POST \
- --url https://api.letta.com/v1/agents/$AGENT_ID/messages/async \
- --header 'Authorization: Bearer $LETTA_API_KEY' \
- --header 'Content-Type: application/json' \
- --data '{
- "messages": [
- {
- "role": "user",
- "content": "Run comprehensive analysis on this dataset"
- }
- ]
-}'
-
-# Poll for results using the returned run ID
-curl --request GET \
- --url https://api.letta.com/v1/runs/$RUN_ID
-```
-```python python maxLines=50
-# Start async operation (returns immediately with run ID)
-run = client.agents.messages.create_async(
- agent_id=agent_state.id,
- messages=[
- {
- "role": "user",
- "content": "Run comprehensive analysis on this dataset"
- }
- ],
-)
-
-# Poll for completion
-import time
-while run.status != "completed":
- time.sleep(2)
- run = client.runs.retrieve(run_id=run.id)
-
-# Get the messages once complete
-messages = client.runs.messages.list(run_id=run.id)
-```
-```typescript TypeScript maxLines=50
-// Start async operation (returns immediately with run ID)
-const run = await client.agents.createAgentMessageAsync({
- agentId: agentState.id,
- requestBody: {
- messages: [
- {
- role: "user",
- content: "Run comprehensive analysis on this dataset"
- }
- ]
- }
-});
-
-// Poll for completion
-while (run.status !== "completed") {
- await new Promise(resolve => setTimeout(resolve, 2000));
- run = await client.runs.retrieveRun({ runId: run.id });
-}
-
-// Get the messages once complete
-const messages = await client.runs.messages.list({ runId: run.id });
-```
-
-
-## Option 3: Configure Streaming with Keepalive Pings and Longer Timeouts
-
-
-**Best for:** Usecases where you are already using the standard [streaming code](/guides/agents/streaming), but are experiencing issues with timeouts or disconnects (e.g. due to network interruptions or hanging tool executions).
-
-**Trade-off:** Not as reliable as background mode, and does not support resuming a disconnected stream/request.
-
-
-
-This approach assumes a persistent HTTP connection. We highly recommend using **background mode** (or async polling) for long-running jobs, especially when:
-- Your infrastructure uses aggressive proxy timeouts
-- You need to handle network interruptions gracefully
-- Operations might exceed 10 minutes
-
-
-For operations under 10 minutes that need real-time updates without the complexity of background processing. Configure keepalive pings and timeouts to maintain stable connections:
-
-
-```curl curl maxLines=50
-curl --request POST \
- --url https://api.letta.com/v1/agents/$AGENT_ID/messages/stream \
- --header 'Authorization: Bearer $LETTA_API_KEY' \
- --header 'Content-Type: application/json' \
- --data '{
- "messages": [
- {
- "role": "user",
- "content": "Execute this long-running analysis"
- }
- ],
- "include_pings": true
-}'
-```
-```python python
-# Configure client with extended timeout
-from letta_client import Letta
-import os
-
-client = Letta(
- token=os.getenv("LETTA_API_KEY")
-)
-
-# Enable pings to prevent timeout during long operations
-stream = client.agents.messages.create_stream(
- agent_id=agent_state.id,
- messages=[
- {
- "role": "user",
- "content": "Execute this long-running analysis"
- }
- ],
- include_pings=True, # Sends periodic keepalive messages
- request_options={"timeout_in_seconds": 600} # 10 min timeout
-)
-
-# Process the stream (pings will keep connection alive)
-for chunk in stream:
- if chunk.message_type == "ping":
- # Keepalive ping received, connection is still active
- continue
- print(chunk)
-```
-```typescript TypeScript maxLines=50
-// Configure client with extended timeout
-import { Letta } from '@letta/sdk';
-
-const client = new Letta({
- token: process.env.LETTA_API_KEY
-});
-
-// Enable pings to prevent timeout during long operations
-const stream = await client.agents.createAgentMessageStream({
- agentId: agentState.id,
- requestBody: {
- messages: [
- {
- role: "user",
- content: "Execute this long-running analysis"
- }
- ],
- includePings: true // Sends periodic keepalive messages
- }, {
- timeoutInSeconds: 600 // 10 minutes timeout in seconds
- }
-});
-
-// Process the stream (pings will keep connection alive)
-for await (const chunk of stream) {
- if (chunk.message_type === "ping") {
- // Keepalive ping received, connection is still active
- continue;
- }
- console.log(chunk);
-}
-```
-
-
-### Configuration Guidelines
-
-| Parameter | Purpose | When to Use |
-|-----------|---------|------------|
-| Timeout in seconds | Extends request timeout beyond 60s default | Set to 1.5x your expected max duration |
-| Include pings | Sends keepalive messages every ~30s | Enable for operations with long gaps between outputs |
diff --git a/fern/pages/api/sdk_migration_guide.mdx b/fern/pages/api/sdk_migration_guide.mdx
deleted file mode 100644
index 4d4ea418..00000000
--- a/fern/pages/api/sdk_migration_guide.mdx
+++ /dev/null
@@ -1,1519 +0,0 @@
----
-title: SDK v1.0 Migration Guide
-subtitle: Upgrading from v0.x to v1.0
-slug: api-reference/sdk-migration-guide
----
-
-
-This guide covers migrating from Letta SDK v0.x (e.g., `1.0.0-alpha.2`) to v1.0 (e.g., `1.0.0-alpha.10`+). For agent architecture migrations, see the [Architecture Migration Guide](/guides/legacy/migration_guide).
-
-
-
-**Letta Cloud Only (for now)**
-
-This SDK v1.0 migration guide applies **only to Letta Cloud**.
-
-The current self-hosted Letta release (v0.13.x) does **not** support the v1.0 SDK. If you are self-hosting Letta, continue using SDK v0.x for now.
-
-**Coming soon:** We will be releasing a new open-source version of Letta that includes SDK v1.0 support for self-hosted deployments.
-
-To use the v1.0 SDK today, you must connect to Letta Cloud at `https://api.letta.com`.
-
-
-## Overview
-
-SDK v1.0 introduces breaking changes to improve consistency and align with modern API design patterns:
-
-- **Naming convention**: All properties now use `snake_case` instead of `camelCase`
-- **Client initialization**: Simplified client constructor with renamed parameters
-- **Method names**: Several methods renamed for clarity
-- **Type imports**: Types moved to subpath exports for better organization
-- **Enums**: Replaced with string literal types
-- **Tool calls**: Changed from single object to array structure
-- **Pagination**: List methods now return page objects
-
-## Quick Reference
-
-### Package Update
-
-Update your package dependency:
-
-
-```json package.json
-{
- "dependencies": {
-- "@letta-ai/letta-client": "1.0.0-alpha.2"
-+ "@letta-ai/letta-client": "1.0.0-alpha.10"
- }
-}
-```
-```toml pyproject.toml
-[tool.poetry.dependencies]
--letta-client = "1.0.0a2"
-+letta-client = "1.0.0a10"
-```
-
-
-### Import Changes
-
-
-```typescript TypeScript
-// Old
-- import { LettaClient, Letta } from "@letta-ai/letta-client";
-
-// New
-+ import Letta from "@letta-ai/letta-client";
-+ import type {
-+ Block,
-+ CreateBlock,
-+ AgentType
-+ } from "@letta-ai/letta-client/resources/agents/agents";
-+ import type {
-+ LettaMessageUnion,
-+ ApprovalCreate
-+ } from "@letta-ai/letta-client/resources/agents/messages";
-+ import type {
-+ LlmConfig
-+ } from "@letta-ai/letta-client/resources/models/models";
-```
-```python Python
-# Old
-- from letta_client import Letta, LettaClient
-
-# New
-+ from letta import Letta
-+ from letta.schemas.agent import Block, CreateBlock, AgentType
-+ from letta.schemas.message import LettaMessageUnion, ApprovalCreate
-+ from letta.schemas.llm_config import LlmConfig
-```
-
-
-### Client Instantiation
-
-
-```typescript TypeScript
-// Old
-- const client = new LettaClient({
-- token: process.env.LETTA_API_KEY,
-- baseUrl: "https://api.letta.com"
-- });
-
-// New
-+ const client = new Letta({
-+ apiKey: process.env.LETTA_API_KEY,
-+ baseURL: "https://api.letta.com"
-+ });
-```
-```python Python
-# Old
-- client = LettaClient(
-- token=os.environ["LETTA_API_KEY"],
-- base_url="https://api.letta.com"
-- )
-
-# New
-+ client = Letta(
-+ api_key=os.environ["LETTA_API_KEY"],
-+ base_url="https://api.letta.com"
-+ )
-```
-
-
-## Breaking Changes by Category
-
-### 1. Pagination
-
-All list endpoints now use cursor-based pagination with consistent parameters:
-
-
-```typescript TypeScript
-// Old - various pagination styles
-const messages = await client.agents.messages.list(agentId, {
- sort_by: "created_at",
- ascending: true
-});
-
-// New - standardized cursor pagination
-const messagesPage = await client.agents.messages.list(agentId, {
- before: "msg_123", // cursor (message ID)
- after: "msg_456", // cursor (message ID)
- limit: 50,
- order: "asc" // or "desc"
-});
-const messages = messagesPage.items;
-```
-```python Python
-# Old
-messages = client.agents.messages.list(
- agent_id=agent_id,
- sort_by="created_at",
- ascending=True
-)
-
-# New
-messages_page = client.agents.messages.list(
- agent_id=agent_id,
- before="msg_123",
- after="msg_456",
- limit=50,
- order="asc"
-)
-messages = messages_page.items
-```
-
-
-**Affected endpoints:**
-- `agents.list()` - renamed `sort_by` → `order_by`, `ascending` → `order`
-- `agents.messages.list()`
-- `agents.tools.list()`
-- `agents.blocks.list()`
-- `agents.files.list()`
-- `agents.folders.list()`
-- `agents.groups.list()`
-- `blocks.list()`
-- `folders.list()`
-- `folders.files.list()`
-- `folders.passages.list()`
-- `folders.agents.list()`
-- `groups.list()`
-- `groups.messages.list()`
-- `identities.list()`
-- `providers.list()`
-- `runs.list()`
-- `runs.messages.list()`
-- `runs.steps.list()`
-- `jobs.list()`
-- `steps.list()`
-- `tags.list()`
-- `tools.list()`
-- `batches.list()`
-- `batches.messages.list()`
-
-### 2. Method Renames and Endpoint Restructuring
-
-Many methods were reorganized for better SDK structure:
-
-
-```typescript TypeScript
-// Agent updates
-- await client.agents.modify(agentId, updates)
-+ await client.agents.update(agentId, updates)
-
-// Message operations
-- await client.agents.summarize_agent_conversation(agentId)
-+ await client.agents.messages.compact(agentId)
-
-- await client.agents.cancel_agent_run(agentId)
-+ await client.agents.messages.cancel(agentId)
-
-- await client.agents.messages.preview_raw_payload(agentId, messages)
-+ await client.agents.messages.preview(agentId, messages)
-
-// Agent file operations
-- await client.agents.list_agent_files(agentId)
-+ await client.agents.files.list(agentId)
-
-// Export/Import
-- await client.agents.export_agent_serialized(agentId)
-+ await client.agents.exportFile(agentId)
-
-- await client.agents.import_agent_serialized(file)
-+ await client.agents.importFile(file)
-
-// Folder operations
-- await client.folders.get_agents_for_folder(folderId)
-+ await client.folders.agents.list(folderId)
-
-- await client.folders.retrieve_folder_metadata(folderId)
-+ await client.folders.retrieve_metadata(folderId)
-
-// Provider operations
-- await client.providers.check_provider(providerId)
-+ await client.providers.check(providerId)
-
-// Telemetry
-- await client.telemetry.retrieve_provider_trace(stepId)
-+ await client.steps.trace(stepId)
-
-// Step metrics
-- await client.steps.retrieve_step_metrics(stepId)
-+ await client.steps.metrics.retrieve(stepId)
-
-// Batch messages
-- await client.messages.list_batch_messages(batchId)
-+ await client.batches.messages.list(batchId)
-
-// Multi-agent groups
-- agent.multi_agent_group
-+ agent.managed_group
-```
-```python Python
-# Agent updates
-- client.agents.modify(agent_id, **updates)
-+ client.agents.update(agent_id, **updates)
-
-// Message operations
-- client.agents.summarize_agent_conversation(agent_id)
-+ client.agents.messages.compact(agent_id)
-
-- client.agents.cancel_agent_run(agent_id)
-+ client.agents.messages.cancel(agent_id)
-
-// Export/Import
-- client.agents.export_agent_serialized(agent_id)
-+ client.agents.export_file(agent_id)
-
-- client.agents.import_agent_serialized(file)
-+ client.agents.import_file(file)
-
-// Folder operations
-- client.folders.get_agents_for_folder(folder_id)
-+ client.folders.agents.list(folder_id)
-
-// Provider operations
-- client.providers.check_provider(provider_id)
-+ client.providers.check(provider_id)
-```
-
-
-### 3. Deprecations
-
-Several endpoints and fields are now deprecated:
-
-**Deprecated endpoints:**
-- `client.agents.search()` - use `client.agents.list()` with filters
-- `client.messages.search()` - use `client.agents.messages.list()` with filters
-- `client.runs.list_active()` - use `client.runs.list(active=True)`
-- `client.jobs.list_active()` - use `client.jobs.list(active=True)`
-- `client.folders.get_by_name()` - use `client.folders.list(name="...")`
-- **MCP routes under `/tools/mcp/servers`** - replaced with new `/mcp-servers` endpoints
- - All old MCP methods moved from `client.tools.mcp.servers` to `client.mcp_servers`
- - Now use server IDs and tool IDs instead of names
-- Sources-related routes - replaced with folders
-- Passages routes - replaced with archives
-- Legacy agent architecture routes
-- All `/count` endpoints
-
-**Deprecated fields:**
-- `agent.memory` - use `agent.blocks`
-- `step.messages` - use `client.steps.messages.list(step_id)`
-- `agent.identity_ids` - replaced with `agent.identities` (full objects)
-- `agent.multi_agent_group` - renamed to `agent.managed_group`
-- `use_assistant_message` parameter - no longer needed
-- `tool_exec_environment_variables` - renamed to `secrets`
-
-**Deprecated on agent/block objects:**
-- Template-related fields: `is_template`, `base_template_id`, `deployment_id`
-- `entity_id`, `preserve_on_migration`, `hidden`
-- `name` on blocks (use `label`)
-
-### 4. Property Names (camelCase → snake_case)
-
-All API properties now use `snake_case`:
-
-
-```typescript TypeScript
-// Agent properties
-- agent.llmConfig
-+ agent.llm_config
-
-- agent.contextWindowLimit
-+ agent.context_window_limit
-
-- agent.blockIds
-+ agent.block_ids
-
-- agent.includeBaseTools
-+ agent.include_base_tools
-
-- agent.includeBaseToolRules
-+ agent.include_base_tool_rules
-
-- agent.initialMessageSequence
-+ agent.initial_message_sequence
-
-// Message properties
-- message.messageType
-+ message.message_type
-
-- message.toolCallId
-+ message.tool_call_id
-
-- message.toolReturn
-+ message.tool_return
-
-- message.toolCall
-+ message.tool_calls // Also changed to array!
-
-// API parameters
-- streamTokens: true
-+ stream_tokens: true
-
-- approvalRequestId: id
-+ approval_request_id: id
-```
-```python Python
-# Agent properties
-- agent.llm_config # Already snake_case
-+ agent.llm_config # No change needed
-
-- agent.context_window_limit
-+ agent.context_window_limit # No change needed
-
-# Python SDK was already using snake_case
-# Most changes affect TypeScript/JavaScript only
-```
-
-
-### 2. Agent Type Specification
-
-
-```typescript TypeScript
-// Old
-- agentType: Letta.AgentType.LettaV1Agent
-
-// New
-+ agent_type: "letta_v1_agent" as AgentType
-```
-```python Python
-# Old
-- agent_type=AgentType.LETTA_V1_AGENT
-
-# New
-+ agent_type="letta_v1_agent"
-```
-
-
-### 3. Method Renames
-
-
-```typescript TypeScript
-// Agent updates
-- await client.agents.modify(agentId, { model, llmConfig })
-+ await client.agents.update(agentId, { model, llm_config })
-
-// Message streaming
-- client.agents.messages.createStream(agentId, { messages, streamTokens })
-+ client.agents.messages.stream(agentId, { messages, stream_tokens })
-```
-```python Python
-# Agent updates
-- client.agents.modify(agent_id, model=model, llm_config=config)
-+ client.agents.update(agent_id, model=model, llm_config=config)
-
-# Message streaming
-- client.agents.messages.create_stream(agent_id, messages=messages)
-+ client.agents.messages.stream(agent_id, messages=messages)
-```
-
-
-### 4. Message Roles and Stop Reasons
-
-Enums replaced with string literals:
-
-
-```typescript TypeScript
-// Message roles
-- role: Letta.MessageCreateRole.User
-+ role: "user"
-
-// Stop reasons
-- if (stopReason === Letta.StopReasonType.EndTurn)
-+ if (stopReason === "end_turn")
-
-- if (stopReason === Letta.StopReasonType.RequiresApproval)
-+ if (stopReason === "requires_approval")
-```
-```python Python
-# Message roles
-- role=MessageRole.USER
-+ role="user"
-
-# Stop reasons
-- if stop_reason == StopReasonType.END_TURN:
-+ if stop_reason == "end_turn":
-
-- if stop_reason == StopReasonType.REQUIRES_APPROVAL:
-+ if stop_reason == "requires_approval":
-```
-
-
-### 5. Tool Calls Structure
-
-Tool calls changed from single object to array:
-
-
-```typescript TypeScript
-// Old - single tool_call
-- if (message.messageType === "approval_request_message") {
-- const toolCall = message.toolCall;
-- const id = toolCall.toolCallId;
-- const name = toolCall.name;
-- }
-
-// New - tool_calls array
-+ if (message.message_type === "approval_request_message") {
-+ const toolCalls = message.tool_calls || [];
-+ if (toolCalls.length > 0) {
-+ const toolCall = toolCalls[0];
-+ const id = toolCall.tool_call_id;
-+ const name = toolCall.name;
-+ }
-+ }
-```
-```python Python
-# Old - single tool_call
-- if message.message_type == "approval_request_message":
-- tool_call = message.tool_call
-- id = tool_call.tool_call_id
-- name = tool_call.name
-
-# New - tool_calls array
-+ if message.message_type == "approval_request_message":
-+ tool_calls = message.tool_calls or []
-+ if len(tool_calls) > 0:
-+ tool_call = tool_calls[0]
-+ id = tool_call.tool_call_id
-+ name = tool_call.name
-```
-
-
-### 6. Pagination
-
-List methods now return page objects:
-
-
-```typescript TypeScript
-// Old
-- const messages = await client.agents.messages.list(agentId);
-
-// New
-+ const messagesPage = await client.agents.messages.list(agentId);
-+ const messages = messagesPage.items;
-```
-```python Python
-# Old
-- messages = client.agents.messages.list(agent_id=agent_id)
-
-# New
-+ messages_page = client.agents.messages.list(agent_id=agent_id)
-+ messages = messages_page.items
-```
-
-
-### 7. Date Handling
-
-
-```typescript TypeScript
-// Old
-- date: new Date()
-
-// New
-+ date: new Date().toISOString()
-```
-```python Python
-# Python handles this automatically
-from datetime import datetime
-date = datetime.now() # Works in both versions
-```
-
-
-### 8. Archive Management (New APIs)
-
-New endpoints for managing archival memory:
-
-
-```typescript TypeScript
-// Create archive
-const archive = await client.archives.create({
- name: "my-archive",
- description: "Project knowledge base"
-});
-
-// List archives
-const archives = await client.archives.list();
-
-// Get archive by ID
-const archive = await client.archives.retrieve(archiveId);
-
-// Update archive
-await client.archives.update(archiveId, { name: "updated-name" });
-
-// Delete archive
-await client.archives.delete(archiveId);
-
-// Attach archive to agent
-await client.agents.archives.attach(agentId, archiveId);
-
-// Detach archive from agent
-await client.agents.archives.detach(agentId, archiveId);
-
-// List agents using an archive
-const agents = await client.archives.agents.list(archiveId);
-
-// Manage memories in archive
-await client.archives.memories.create(archiveId, { text: "Important fact" });
-await client.archives.memories.update(archiveId, memoryId, { text: "Updated fact" });
-await client.archives.memories.delete(archiveId, memoryId);
-```
-```python Python
-# Create archive
-archive = client.archives.create(
- name="my-archive",
- description="Project knowledge base"
-)
-
-# Attach/detach
-client.agents.archives.attach(agent_id, archive_id)
-client.agents.archives.detach(agent_id, archive_id)
-
-# Manage memories
-client.archives.memories.create(archive_id, text="Important fact")
-```
-
-
-### 9. Identity and Block Management
-
-New attach/detach patterns for identities and blocks:
-
-
-```typescript TypeScript
-// Attach identity to agent
-await client.agents.identities.attach(agentId, identityId);
-
-// Detach identity from agent
-await client.agents.identities.detach(agentId, identityId);
-
-// Attach identity to block
-await client.blocks.identities.attach(blockId, identityId);
-
-// Detach identity from block
-await client.blocks.identities.detach(blockId, identityId);
-
-// Agent now returns full identity objects
-const agent = await client.agents.retrieve(agentId);
-// Old: agent.identity_ids = ["id1", "id2"]
-// New: agent.identities = [{ id: "id1", name: "Alice", ... }, ...]
-```
-```python Python
-# Attach/detach identities
-client.agents.identities.attach(agent_id, identity_id)
-client.agents.identities.detach(agent_id, identity_id)
-
-# Full identity objects
-agent = client.agents.retrieve(agent_id)
-for identity in agent.identities:
- print(identity.name)
-```
-
-
-### 10. Agent Configuration Updates
-
-New parameters available for agent creation and updates:
-
-
-```typescript TypeScript
-// Temperature, top_p, reasoning_effort now available at top level
-const agent = await client.agents.create({
- model: "openai/gpt-4",
- temperature: 0.7,
- top_p: 0.9,
- reasoning_effort: "medium",
- max_tokens: 4096,
- context_window_limit: 128000
-});
-
-// Update agent configuration
-await client.agents.update(agentId, {
- temperature: 0.5,
- context_window_limit: 64000
-});
-```
-```python Python
-# Create with configuration
-agent = client.agents.create(
- model="openai/gpt-4",
- temperature=0.7,
- top_p=0.9,
- reasoning_effort="medium",
- max_tokens=4096,
- context_window_limit=128000
-)
-
-# Update configuration
-client.agents.update(
- agent_id,
- temperature=0.5
-)
-```
-
-
-### 11. Message Input Shorthand
-
-Simplified syntax for sending simple user messages:
-
-
-```typescript TypeScript
-// Old - verbose
-const response = await client.agents.messages.create(agentId, {
- messages: [{
- role: "user",
- content: "Hello!"
- }]
-});
-
-// New - shorthand available
-const response = await client.agents.messages.create(agentId, {
- input: "Hello!" // Automatically creates user message
-});
-
-// Both forms still supported
-```
-```python Python
-# Old
-response = client.agents.messages.create(
- agent_id,
- messages=[{"role": "user", "content": "Hello!"}]
-)
-
-# New shorthand
-response = client.agents.messages.create(
- agent_id,
- input="Hello!"
-)
-```
-
-
-### 12. Attach/Detach Return Values
-
-All attach/detach endpoints now return `None` instead of agent/object state:
-
-
-```typescript TypeScript
-// Old - returned updated agent
-const updatedAgent = await client.agents.tools.attach(agentId, toolId);
-
-// New - returns void/None
-await client.agents.tools.attach(agentId, toolId);
-// Fetch agent separately if needed
-const agent = await client.agents.retrieve(agentId);
-```
-```python Python
-# Old
-updated_agent = client.agents.tools.attach(agent_id, tool_id)
-
-# New
-client.agents.tools.attach(agent_id, tool_id)
-agent = client.agents.retrieve(agent_id)
-```
-
-
-**Affected methods:**
-- `agents.tools.attach/detach`
-- `agents.blocks.attach/detach`
-- `agents.folders.attach/detach`
-- `agents.archives.attach/detach`
-- `agents.identities.attach/detach`
-- `blocks.identities.attach/detach`
-
-### 13. Agent Import/Export Changes
-
-Import endpoint now supports name overriding:
-
-
-```typescript TypeScript
-// Old - append_copy_suffix parameter
-const agent = await client.agents.import(file, {
- append_copy_suffix: true // Deprecated
-});
-
-// New - override_name parameter
-const agent = await client.agents.importFile(file, {
- override_name: "my-imported-agent" // Optional, exact name to use
-});
-```
-```python Python
-# Old
-agent = client.agents.import_agent(
- file,
- append_copy_suffix=True
-)
-
-# New
-agent = client.agents.import_file(
- file,
- override_name="my-imported-agent"
-)
-```
-
-
-### 14. Query Parameter to Request Body Changes
-
-Several endpoints moved from query parameters to request body:
-
-
-```typescript TypeScript
-// Tool approval settings (was query params, now request body)
-await client.agents.tools.update_approval(agentId, toolName, {
- require_approval: true
-});
-
-// Reset messages (was query param, now request body)
-await client.agents.messages.reset(agentId, {
- add_default_initial_messages: false
-});
-
-// Steps feedback (was query params, now request body)
-await client.steps.feedback.create(stepId, {
- rating: "positive",
- tags: ["helpful", "accurate"]
-});
-```
-```python Python
-# Tool approval
-client.agents.tools.update_approval(
- agent_id,
- tool_name,
- require_approval=True
-)
-
-# Reset messages
-client.agents.messages.reset(
- agent_id,
- add_default_initial_messages=False
-)
-```
-
-
-### 15. Tags Endpoint
-
-Tags list endpoint now uses `name` parameter instead of `query_text`:
-
-
-```typescript TypeScript
-// Old
-const tags = await client.tags.list({ query_text: "important" });
-
-// New
-const tags = await client.tags.list({ name: "important" });
-```
-```python Python
-# Old
-tags = client.tags.list(query_text="important")
-
-# New
-tags = client.tags.list(name="important")
-```
-
-
-### 16. Project ID Handling
-
-Project ID is now passed in the client constructor or headers:
-
-
-```typescript TypeScript
-// Pass in constructor
-const client = new Letta({
- apiKey: process.env.LETTA_API_KEY,
- projectId: "proj_123"
-});
-
-// No longer in URL paths
-- await client.templates.agents.create("proj_123", templateVersion, data)
-+ await client.templates.agents.create(templateVersion, data)
-```
-```python Python
-# Pass in constructor
-client = Letta(
- api_key=os.environ["LETTA_API_KEY"],
- project_id="proj_123"
-)
-```
-
-
-### 17. MCP (Model Context Protocol) Server Management
-
-MCP routes have been completely restructured with new endpoints under `/mcp-servers`:
-
-
-```typescript TypeScript
-// OLD ROUTES (under /tools/mcp/servers - DEPRECATED)
-// Using server names and tool names
-
-// List MCP servers
-- const servers = await client.tools.mcp.servers.list();
-
-// Add MCP server
-- await client.tools.mcp.servers.create(serverConfig);
-
-// Update MCP server by name
-- await client.tools.mcp.servers.update(serverName, updateConfig);
-
-// Delete MCP server by name
-- await client.tools.mcp.servers.delete(serverName);
-
-// List tools from a server by name
-- const tools = await client.tools.mcp.servers.tools.list(serverName);
-
-// Add individual tool by name
-- await client.tools.mcp.servers.tools.add(serverName, toolName);
-
-// Resync tools
-- await client.tools.mcp.servers.resync(serverName);
-
-// Execute tool by names
-- await client.tools.mcp.servers.tools.execute(serverName, toolName, { args });
-
-// Connect to server (for OAuth)
-- await client.tools.mcp.servers.connect(serverConfig);
-
-// NEW ROUTES (under /mcp-servers)
-// Using server IDs and tool IDs
-
-// List MCP servers (returns array of server objects with IDs)
-+ const servers = await client.mcp_servers.list();
-
-// Create MCP server (automatically syncs tools)
-+ const server = await client.mcp_servers.create(serverConfig);
-+ // Returns: { id: "mcp_server_123", name: "...", ... }
-
-// Get MCP server by ID
-+ const server = await client.mcp_servers.retrieve(serverId);
-
-// Update MCP server by ID
-+ await client.mcp_servers.update(serverId, updateConfig);
-
-// Delete MCP server by ID
-+ await client.mcp_servers.delete(serverId);
-
-// List tools from a server by ID
-+ const tools = await client.mcp_servers.tools.list(serverId);
-
-// Get specific tool by ID
-+ const tool = await client.mcp_servers.tools.retrieve(serverId, toolId);
-
-// Run/execute tool by ID
-+ const result = await client.mcp_servers.tools.run(serverId, toolId, {
-+ args: { key: "value" }
-+ });
-
-// Refresh tools (replaces resync)
-+ await client.mcp_servers.refresh(serverId);
-
-// Connect to server (for OAuth) - now uses server ID
-+ await client.mcp_servers.connect(serverId);
-```
-```python Python
-# OLD ROUTES (DEPRECATED)
-- servers = client.tools.mcp.servers.list()
-- client.tools.mcp.servers.create(server_config)
-- client.tools.mcp.servers.update(server_name, update_config)
-- client.tools.mcp.servers.delete(server_name)
-- tools = client.tools.mcp.servers.tools.list(server_name)
-- client.tools.mcp.servers.tools.add(server_name, tool_name)
-- client.tools.mcp.servers.resync(server_name)
-- client.tools.mcp.servers.tools.execute(server_name, tool_name, args=args)
-
-# NEW ROUTES
-+ servers = client.mcp_servers.list()
-+ server = client.mcp_servers.create(server_config)
-+ server = client.mcp_servers.retrieve(server_id)
-+ client.mcp_servers.update(server_id, update_config)
-+ client.mcp_servers.delete(server_id)
-+ tools = client.mcp_servers.tools.list(server_id)
-+ tool = client.mcp_servers.tools.retrieve(server_id, tool_id)
-+ result = client.mcp_servers.tools.run(server_id, tool_id, args={"key": "value"})
-+ client.mcp_servers.refresh(server_id)
-+ client.mcp_servers.connect(server_id)
-```
-
-
-**Key Changes:**
-- **Namespace**: Moved from `client.tools.mcp.servers` to `client.mcp_servers`
-- **Identification**: Use server IDs and tool IDs instead of names
- - Old: `serverName` (string) → New: `serverId` (ID string like `"mcp_server_123"`)
- - Old: `toolName` (string) → New: `toolId` (ID string like `"tool_456"`)
-- **Tool Management**: Tools are now automatically synced when creating a server
- - No longer need to manually "add" individual tools
- - Use `refresh()` to resync tools (replaces `resync()`)
-- **Tool Execution**: Method renamed from `execute()` to `run()`
-- **Server Retrieval**: New `retrieve()` method to get individual server by ID
-- **Tool Retrieval**: New `retrieve()` method to get individual tool by ID
-
-**Migration Example:**
-
-
-```typescript TypeScript
-// Before: Using names
-const servers = await client.tools.mcp.servers.list();
-const myServer = servers.find(s => s.name === "my-server");
-const tools = await client.tools.mcp.servers.tools.list("my-server");
-const myTool = tools.find(t => t.name === "my-tool");
-await client.tools.mcp.servers.tools.execute("my-server", "my-tool", {
- args: { query: "hello" }
-});
-
-// After: Using IDs
-const servers = await client.mcp_servers.list();
-const myServer = servers.find(s => s.name === "my-server");
-const serverId = myServer.id; // Get ID from server object
-const tools = await client.mcp_servers.tools.list(serverId);
-const myTool = tools.find(t => t.name === "my-tool");
-const toolId = myTool.id; // Get ID from tool object
-await client.mcp_servers.tools.run(serverId, toolId, {
- args: { query: "hello" }
-});
-```
-```python Python
-# Before
-servers = client.tools.mcp.servers.list()
-my_server = next(s for s in servers if s.name == "my-server")
-tools = client.tools.mcp.servers.tools.list("my-server")
-my_tool = next(t for t in tools if t.name == "my-tool")
-client.tools.mcp.servers.tools.execute(
- "my-server",
- "my-tool",
- args={"query": "hello"}
-)
-
-# After
-servers = client.mcp_servers.list()
-my_server = next(s for s in servers if s.name == "my-server")
-server_id = my_server.id
-tools = client.mcp_servers.tools.list(server_id)
-my_tool = next(t for t in tools if t.name == "my-tool")
-tool_id = my_tool.id
-client.mcp_servers.tools.run(
- server_id,
- tool_id,
- args={"query": "hello"}
-)
-```
-
-
-**Notes:**
-- MCP servers and tools now have persistent IDs in the database
-- Server names are no longer unique identifiers - use IDs instead
-- Tool schemas are automatically kept in sync via the `refresh()` endpoint
-- The old routes under `/tools/mcp/servers` are deprecated and will be removed
-
-## Migration Examples
-
-### Complete Agent Creation
-
-
-```typescript TypeScript
-// Before
-const agent = await client.agents.create({
- agentType: Letta.AgentType.LettaV1Agent,
- model: "openai/gpt-4",
- contextWindowLimit: 200_000,
- blockIds: ["block-1", "block-2"],
- includeBaseTools: false,
- includeBaseToolRules: false,
- initialMessageSequence: [],
-});
-
-// After
-const agent = await client.agents.create({
- agent_type: "letta_v1_agent" as AgentType,
- model: "openai/gpt-4",
- context_window_limit: 200_000,
- block_ids: ["block-1", "block-2"],
- include_base_tools: false,
- include_base_tool_rules: false,
- initial_message_sequence: [],
-});
-```
-```python Python
-# Before
-agent = client.agents.create(
- agent_type=AgentType.LETTA_V1_AGENT,
- model="openai/gpt-4",
- context_window_limit=200_000,
- block_ids=["block-1", "block-2"],
- include_base_tools=False,
- include_base_tool_rules=False,
- initial_message_sequence=[],
-)
-
-# After
-agent = client.agents.create(
- agent_type="letta_v1_agent",
- model="openai/gpt-4",
- context_window_limit=200_000,
- block_ids=["block-1", "block-2"],
- include_base_tools=False,
- include_base_tool_rules=False,
- initial_message_sequence=[],
-)
-```
-
-
-### Streaming Messages
-
-
-```typescript TypeScript
-// Before
-const stream = await client.agents.messages.createStream(agentId, {
- messages: [{
- role: Letta.MessageCreateRole.User,
- content: "Hello"
- }],
- streamTokens: true,
-});
-
-// After
-const stream = await client.agents.messages.stream(agentId, {
- messages: [{
- role: "user",
- content: "Hello"
- }],
- stream_tokens: true,
-});
-```
-```python Python
-# Before
-stream = client.agents.messages.create_stream(
- agent_id=agent_id,
- messages=[{"role": "user", "content": "Hello"}],
- stream_tokens=True,
-)
-
-# After
-stream = client.agents.messages.stream(
- agent_id=agent_id,
- messages=[{"role": "user", "content": "Hello"}],
- stream_tokens=True,
-)
-```
-
-
-### Handling Approvals
-
-
-```typescript TypeScript
-// Before
-if (message.messageType === "approval_request_message") {
- const toolCall = message.toolCall;
- await client.agents.messages.create(agentId, {
- messages: [{
- type: "approval",
- approvalRequestId: toolCall.toolCallId,
- approve: true,
- }],
- });
-}
-
-// After
-if (message.message_type === "approval_request_message") {
- const toolCalls = message.tool_calls || [];
- if (toolCalls.length > 0) {
- const toolCall = toolCalls[0];
- await client.agents.messages.create(agentId, {
- messages: [{
- type: "approval",
- approval_request_id: toolCall.tool_call_id,
- approve: true,
- }],
- });
- }
-}
-```
-```python Python
-# Before
-if message.message_type == "approval_request_message":
- tool_call = message.tool_call
- client.agents.messages.create(
- agent_id=agent_id,
- messages=[{
- "type": "approval",
- "approval_request_id": tool_call.tool_call_id,
- "approve": True,
- }],
- )
-
-# After
-if message.message_type == "approval_request_message":
- tool_calls = message.tool_calls or []
- if len(tool_calls) > 0:
- tool_call = tool_calls[0]
- client.agents.messages.create(
- agent_id=agent_id,
- messages=[{
- "type": "approval",
- "approval_request_id": tool_call.tool_call_id,
- "approve": True,
- }],
- )
-```
-
-
-### Updating Agent Configuration
-
-
-```typescript TypeScript
-// Before
-await client.agents.modify(agentId, {
- model: "openai/gpt-4",
- llmConfig: { temperature: 0.7 }
-});
-const agent = await client.agents.retrieve(agentId);
-const config = agent.llmConfig;
-
-// After
-await client.agents.update(agentId, {
- model: "openai/gpt-4",
- llm_config: { temperature: 0.7 }
-});
-const agent = await client.agents.retrieve(agentId);
-const config = agent.llm_config;
-```
-```python Python
-# Before
-client.agents.modify(
- agent_id=agent_id,
- model="openai/gpt-4",
- llm_config={"temperature": 0.7}
-)
-agent = client.agents.retrieve(agent_id=agent_id)
-config = agent.llm_config
-
-# After
-client.agents.update(
- agent_id=agent_id,
- model="openai/gpt-4",
- llm_config={"temperature": 0.7}
-)
-agent = client.agents.retrieve(agent_id=agent_id)
-config = agent.llm_config
-```
-
-
-## Migration Checklist
-
-Use this checklist to ensure a complete migration:
-
-**Core SDK Changes:**
-- [ ] Update package version to `1.0.0-alpha.10` or later
-- [ ] Update all imports (client and types)
-- [ ] Replace `LettaClient` with `Letta`
-- [ ] Update client constructor params: `token` → `apiKey`, `baseUrl` → `baseURL`
-- [ ] Add `projectId` to client constructor if using multi-project setup
-- [ ] Convert all property names from `camelCase` to `snake_case`
-- [ ] Replace enum references with string literals
-- [ ] Convert `Date` objects to ISO strings where required
-- [ ] Update type annotations to use new import paths
-
-**Method Renames:**
-- [ ] Update `modify()` calls to `update()`
-- [ ] Update `createStream()` calls to `stream()`
-- [ ] Rename `summarize_agent_conversation()` → `messages.compact()`
-- [ ] Rename `cancel_agent_run()` → `messages.cancel()`
-- [ ] Rename `preview_raw_payload()` → `messages.preview()`
-- [ ] Rename `list_agent_files()` → `files.list()`
-- [ ] Rename `export_agent_serialized()` → `export_file()`
-- [ ] Rename `import_agent_serialized()` → `import_file()`
-- [ ] Rename folder/provider method names (see section 2)
-- [ ] Update telemetry routes to use `steps.trace()`
-
-**Pagination:**
-- [ ] Update all list methods to access `.items` property
-- [ ] Replace `sort_by` with `order_by` in `agents.list()`
-- [ ] Replace `ascending` with `order` parameter
-- [ ] Update pagination parameters: `before`, `after`, `limit`, `order`
-- [ ] Handle cursor-based pagination for all list endpoints
-
-**Message Handling:**
-- [ ] Handle `tool_calls` as an array instead of single object
-- [ ] Update `identity_ids` references to use `identities` (full objects)
-- [ ] Replace `agent.memory` with `agent.blocks`
-- [ ] Update `step.messages` to use `steps.messages.list()`
-- [ ] Consider using new `input` shorthand for simple messages
-
-**Deprecations:**
-- [ ] Remove usage of deprecated search endpoints
-- [ ] Replace `list_active()` with `list(active=True)`
-- [ ] Remove `use_assistant_message` parameter
-- [ ] Replace `tool_exec_environment_variables` with `secrets`
-- [ ] Remove template-related fields from agent/block objects
-- [ ] Replace sources endpoints with folders
-- [ ] Replace passages endpoints with archives
-
-**New Features:**
-- [ ] Update attach/detach methods (now return `None`)
-- [ ] Use new archive management APIs if needed
-- [ ] Update agent import to use `override_name` instead of `append_copy_suffix`
-- [ ] Move query parameters to request body for affected endpoints
-- [ ] Use new agent configuration parameters (`temperature`, `top_p`, etc.)
-
-**MCP (Model Context Protocol) Changes:**
-- [ ] Migrate from `client.tools.mcp.servers` to `client.mcp_servers`
-- [ ] Update MCP server references to use IDs instead of names
-- [ ] Update MCP tool references to use IDs instead of names
-- [ ] Remove manual tool "add" operations (tools auto-sync on server create)
-- [ ] Replace `resync()` calls with `refresh()`
-- [ ] Replace `execute()` calls with `run()`
-- [ ] Add server/tool ID lookup logic if using names
-- [ ] Update OAuth connection flow to use server IDs
-
-**Testing:**
-- [ ] Test all agent operations (create, update, message)
-- [ ] Test streaming and approval flows
-- [ ] Verify memory block operations still work
-- [ ] Test pagination on list endpoints
-- [ ] Test archive management if used
-- [ ] Verify identity/block attach/detach operations
-- [ ] Test agent import/export
-
-## Automated Migration Tools
-
-### Find and Replace Script
-
-Use this script to help automate common replacements:
-
-
-```bash Shell (TypeScript projects)
-# Install dependencies
-npm install -g jscodeshift
-
-# Run find-and-replace (adjust paths as needed)
-find src -name "*.ts" -o -name "*.tsx" | xargs sed -i '' \
- -e 's/LettaClient/Letta/g' \
- -e 's/\.modify(/.update(/g' \
- -e 's/\.createStream(/.stream(/g' \
- -e 's/\.messageType/.message_type/g' \
- -e 's/\.toolCall/.tool_calls/g' \
- -e 's/\.toolCallId/.tool_call_id/g' \
- -e 's/\.toolReturn/.tool_return/g' \
- -e 's/llmConfig/llm_config/g' \
- -e 's/streamTokens/stream_tokens/g' \
- -e 's/\.tools\.mcp\.servers/\.mcp_servers/g' \
- -e 's/\.resync(/\.refresh(/g'
-
-# Note: MCP server/tool name -> ID migration requires manual intervention
-# as you need to fetch IDs from the API
-```
-```python Python (migration helper)
-import re
-from pathlib import Path
-
-def migrate_file(filepath: Path):
- """Apply SDK v1.0 migration patterns to a Python file"""
- content = filepath.read_text()
-
- # Import updates
- content = re.sub(
- r'from letta_client import (\w+)',
- r'from letta import \1',
- content
- )
-
- # Method renames
- content = content.replace('.modify(', '.update(')
- content = content.replace('.create_stream(', '.stream(')
-
- # MCP namespace changes
- content = content.replace('.tools.mcp.servers', '.mcp_servers')
- content = content.replace('.resync(', '.refresh(')
-
- # Already using snake_case in Python, but fix any camelCase
- content = re.sub(r'messageType', 'message_type', content)
- content = re.sub(r'toolCall([^_])', r'tool_calls\1', content)
-
- filepath.write_text(content)
- print(f"✓ Migrated {filepath}")
-
-# Usage
-for py_file in Path('src').rglob('*.py'):
- migrate_file(py_file)
-```
-
-
-
-**Always review automated changes!** These scripts help with common patterns but cannot handle all edge cases. Test thoroughly after migration.
-
-
-## Troubleshooting
-
-### "Property 'llmConfig' does not exist" (TypeScript)
-
-**Cause:** Property renamed to `llm_config`
-
-**Fix:** Update all references to use snake_case
-
-```typescript
-- agent.llmConfig
-+ agent.llm_config
-```
-
-### "Cannot read property 'toolCallId' of undefined"
-
-**Cause:** `tool_call` changed to `tool_calls` (array)
-
-**Fix:** Access the first element of the array
-
-```typescript
-- const id = message.toolCall.toolCallId;
-+ const toolCalls = message.tool_calls || [];
-+ const id = toolCalls[0]?.tool_call_id;
-```
-
-### "items is not iterable"
-
-**Cause:** Trying to iterate over page object instead of items array
-
-**Fix:** Access the `.items` property first
-
-```typescript
-- for (const message of messages) {
-+ const messagesPage = await client.agents.messages.list(agentId);
-+ for (const message of messagesPage.items) {
-```
-
-### "Cannot find module '@letta-ai/letta-client/resources/...'"
-
-**Cause:** Types moved to subpath exports
-
-**Fix:** Update imports to use new subpaths
-
-```typescript
-- import { Letta } from "@letta-ai/letta-client";
-+ import type { Block } from "@letta-ai/letta-client/resources/agents/agents";
-```
-
-### "Method 'modify' does not exist"
-
-**Cause:** Method renamed to `update`
-
-**Fix:** Update all modify calls
-
-```typescript
-- await client.agents.modify(agentId, updates)
-+ await client.agents.update(agentId, updates)
-```
-
-### "Cannot access property 'identity_ids'"
-
-**Cause:** Field renamed to `identities` and now returns full objects
-
-**Fix:** Access the `identities` array and extract IDs if needed
-
-```typescript
-- const ids = agent.identity_ids;
-+ const identities = agent.identities;
-+ const ids = identities.map(i => i.id);
-```
-
-### "Pagination parameters 'sort_by' or 'ascending' not recognized"
-
-**Cause:** Pagination parameters standardized to `order_by` and `order`
-
-**Fix:** Update parameter names
-
-```typescript
-- await client.agents.list({ sort_by: "created_at", ascending: true })
-+ await client.agents.list({ order_by: "created_at", order: "asc" })
-```
-
-### "Attach/detach methods return undefined"
-
-**Cause:** These methods now return `None`/`void` instead of updated state
-
-**Fix:** Fetch the object separately if you need the updated state
-
-```typescript
-await client.agents.tools.attach(agentId, toolId);
-const agent = await client.agents.retrieve(agentId); // Get updated state
-```
-
-### "Cannot find method 'summarize_agent_conversation'"
-
-**Cause:** Method moved to messages subresource
-
-**Fix:** Use the new path
-
-```typescript
-- await client.agents.summarize_agent_conversation(agentId)
-+ await client.agents.messages.compact(agentId)
-```
-
-### "Query parameter 'add_default_initial_messages' not working"
-
-**Cause:** Parameter moved from query to request body
-
-**Fix:** Pass as request body parameter
-
-```typescript
-- await client.agents.messages.reset(agentId, { params: { add_default_initial_messages: false } })
-+ await client.agents.messages.reset(agentId, { add_default_initial_messages: false })
-```
-
-### "Cannot find 'client.tools.mcp.servers'"
-
-**Cause:** MCP routes moved to new namespace
-
-**Fix:** Use new MCP server methods
-
-```typescript
-- await client.tools.mcp.servers.list()
-+ await client.mcp_servers.list()
-```
-
-### "MCP server not found by name"
-
-**Cause:** MCP methods now use server IDs instead of names
-
-**Fix:** Lookup server ID from name first
-
-```typescript
-// Get server ID from name
-const servers = await client.mcp_servers.list();
-const myServer = servers.find(s => s.name === "my-server");
-const serverId = myServer.id;
-
-// Use ID for subsequent operations
-await client.mcp_servers.tools.list(serverId);
-```
-
-### "MCP tool 'toolName' not found"
-
-**Cause:** MCP tool execution now uses tool IDs instead of names
-
-**Fix:** Lookup tool ID from name first
-
-```typescript
-const tools = await client.mcp_servers.tools.list(serverId);
-const myTool = tools.find(t => t.name === "my-tool");
-const toolId = myTool.id;
-
-await client.mcp_servers.tools.run(serverId, toolId, { args });
-```
-
-### "Method 'execute' not found on mcp_servers.tools"
-
-**Cause:** Method renamed from `execute()` to `run()`
-
-**Fix:** Use the new method name
-
-```typescript
-- await client.mcp_servers.tools.execute(serverId, toolId, { args })
-+ await client.mcp_servers.tools.run(serverId, toolId, { args })
-```
-
-## Additional Resources
-
-- [Architecture Migration Guide](/guides/legacy/migration_guide) - For migrating agent architectures
-- [API Reference](/api-reference) - Complete SDK documentation
-- [Changelog](/api-reference/changelog) - All SDK changes
-- [GitHub](https://github.com/letta-ai/letta) - Source code and issues
-- [Discord](https://discord.gg/letta) - Get help from the community
-
-## Getting Help
-
-If you encounter issues during migration:
-
-1. **Check the [Changelog](/api-reference/changelog)** for detailed release notes
-2. **Search [GitHub Issues](https://github.com/letta-ai/letta/issues)** for known problems
-3. **Ask in [Discord #dev-help](https://discord.gg/letta)** for community support
-4. **Contact support@letta.com** for enterprise support
diff --git a/letta/__init__.py b/letta/__init__.py
index a8230d79..b091ed4b 100644
--- a/letta/__init__.py
+++ b/letta/__init__.py
@@ -5,7 +5,7 @@ try:
__version__ = version("letta")
except PackageNotFoundError:
# Fallback for development installations
- __version__ = "0.15.0"
+ __version__ = "0.15.1"
if os.environ.get("LETTA_VERSION"):
__version__ = os.environ["LETTA_VERSION"]
diff --git a/pyproject.toml b/pyproject.toml
index 367dfaf4..192a8bc5 100644
--- a/pyproject.toml
+++ b/pyproject.toml
@@ -1,6 +1,6 @@
[project]
name = "letta"
-version = "0.15.0"
+version = "0.15.1"
description = "Create LLM agents with long-term memory and custom tools"
authors = [
{name = "Letta Team", email = "contact@letta.com"},
diff --git a/uv.lock b/uv.lock
index 584b034b..dc7c51db 100644
--- a/uv.lock
+++ b/uv.lock
@@ -2335,7 +2335,7 @@ wheels = [
[[package]]
name = "letta"
-version = "0.15.0"
+version = "0.15.1"
source = { editable = "." }
dependencies = [
{ name = "aiomultiprocess" },