chore: bump version 0.11.8

This commit is contained in:
Caren Thomas
2025-10-07 18:31:26 -07:00
307 changed files with 45291 additions and 14900 deletions

View File

@@ -62,13 +62,6 @@ def default_user(default_organization):
yield user
@pytest.fixture
def check_composio_key_set():
original_api_key = tool_settings.composio_api_key
assert original_api_key is not None, "Missing composio key! Cannot execute this test."
yield
# --- Tool Fixtures ---
@pytest.fixture
def weather_tool_func():

View File

@@ -34,15 +34,11 @@ jobs:
"fail-fast": false,
"matrix": {
"test_suite": [
"integration_test_summarizer.py",
"integration_test_async_tool_sandbox.py",
"integration_test_sleeptime_agent.py",
"integration_test_agent_tool_graph.py",
"integration_test_composio.py",
"integration_test_chat_completions.py",
"integration_test_multi_agent.py",
"integration_test_batch_api_cron_jobs.py",
"integration_test_batch_sdk.py",
"integration_test_builtin_tools.py",
"integration_test_turbopuffer.py",
"integration_test_human_in_the_loop.py"

View File

@@ -25,7 +25,7 @@ jobs:
apps/core/**
.github/workflows/reusable-test-workflow.yml
.github/workflows/core-unit-sqlite-test.yml
install-args: '--extra postgres --extra external-tools --extra dev --extra cloud-tool-sandbox --extra google --extra sqlite'
install-args: '--extra postgres --extra external-tools --extra dev --extra cloud-tool-sandbox --extra sqlite'
timeout-minutes: 15
ref: ${{ github.event.pull_request.head.sha || github.sha }}
@@ -36,21 +36,19 @@ jobs:
"include": [
{"test_suite": "test_client.py"},
{"test_suite": "test_sdk_client.py"},
{"test_suite": "test_server.py"},
{"test_suite": "test_tool_schema_parsing.py"},
{"test_suite": "test_tool_rule_solver.py"},
{"test_suite": "test_memory.py"},
{"test_suite": "test_utils.py"},
{"test_suite": "test_stream_buffer_readers.py"},
{"test_suite": "test_agent_serialization.py"},
{"test_suite": "test_optimistic_json_parser.py"},
{"test_suite": "test_llm_clients.py"},
{"test_suite": "test_letta_agent_batch.py"},
{"test_suite": "test_providers.py"},
{"test_suite": "test_sources.py"},
{"test_suite": "test_managers.py"},
{"test_suite": "managers/"},
{"test_suite": "sdk/"},
{"test_suite": "mcp_tests/", "use_experimental": true},
{"test_suite": "mcp_tests/"},
{"test_suite": "test_timezone_formatting.py"},
{"test_suite": "test_plugins.py"},
{"test_suite": "test_embeddings.py"},

View File

@@ -26,7 +26,7 @@ jobs:
**
.github/workflows/reusable-test-workflow.yml
.github/workflows/core-unit-test.yml
install-args: '--extra postgres --extra external-tools --extra dev --extra cloud-tool-sandbox --extra google'
install-args: '--extra postgres --extra external-tools --extra dev --extra cloud-tool-sandbox'
timeout-minutes: 15
ref: ${{ github.event.pull_request.head.sha || github.sha }}
matrix-strategy: |
@@ -36,14 +36,12 @@ jobs:
"include": [
{"test_suite": "test_client.py"},
{"test_suite": "test_sdk_client.py"},
{"test_suite": "test_server.py"},
{"test_suite": "test_managers.py"},
{"test_suite": "managers/"},
{"test_suite": "test_tool_schema_parsing.py"},
{"test_suite": "test_tool_rule_solver.py"},
{"test_suite": "test_memory.py"},
{"test_suite": "test_utils.py"},
{"test_suite": "test_stream_buffer_readers.py"},
{"test_suite": "test_agent_serialization.py"},
{"test_suite": "test_agent_serialization_v2.py"},
{"test_suite": "test_optimistic_json_parser.py"},
{"test_suite": "test_llm_clients.py"},
@@ -51,7 +49,7 @@ jobs:
{"test_suite": "test_providers.py"},
{"test_suite": "test_sources.py"},
{"test_suite": "sdk/"},
{"test_suite": "mcp_tests/", "use_experimental": true},
{"test_suite": "mcp_tests/"},
{"test_suite": "test_timezone_formatting.py"},
{"test_suite": "test_plugins.py"},
{"test_suite": "test_embeddings.py"},

View File

@@ -21,9 +21,9 @@ jobs:
python-version: 3.11
- name: Install uv
uses: astral-sh/setup-uv@v6
with:
enable-cache: true
run: |
curl -LsSf https://astral.sh/uv/install.sh | sh
echo "$HOME/.cargo/bin" >> $GITHUB_PATH
- name: Set permissions for log directory
run: |

View File

@@ -89,10 +89,9 @@ jobs:
python-version: 3.12
- name: Install uv
uses: astral-sh/setup-uv@v6
with:
enable-cache: false
activate-environment: true
run: |
curl -LsSf https://astral.sh/uv/install.sh | sh
echo "$HOME/.cargo/bin" >> $GITHUB_PATH
- name: Install dependencies
run: uv sync --extra dev --extra postgres --extra external-tools

View File

@@ -34,9 +34,9 @@ jobs:
python-version: 3.11
- name: Install uv
uses: astral-sh/setup-uv@v6
with:
enable-cache: true
run: |
curl -LsSf https://astral.sh/uv/install.sh | sh
echo "$HOME/.cargo/bin" >> $GITHUB_PATH
- name: Install Dependencies
run: |

View File

@@ -61,7 +61,7 @@ jobs:
- name: Install dependencies
shell: bash
run: uv sync --extra dev --extra postgres --extra external-tools --extra cloud-tool-sandbox --extra google
run: uv sync --extra dev --extra postgres --extra external-tools --extra cloud-tool-sandbox
- name: Migrate database
env:
LETTA_PG_PORT: 5432
@@ -88,11 +88,9 @@ jobs:
AZURE_API_KEY: ${{ env.AZURE_API_KEY }}
AZURE_BASE_URL: ${{ secrets.AZURE_BASE_URL }}
GEMINI_API_KEY: ${{ env.GEMINI_API_KEY }}
COMPOSIO_API_KEY: ${{ env.COMPOSIO_API_KEY }}
GOOGLE_CLOUD_PROJECT: ${{ secrets.GOOGLE_CLOUD_PROJECT}}
GOOGLE_CLOUD_LOCATION: ${{ secrets.GOOGLE_CLOUD_LOCATION}}
DEEPSEEK_API_KEY: ${{ env.DEEPSEEK_API_KEY}}
LETTA_USE_EXPERIMENTAL: 1
run: |
uv run pytest \
-s -vv \

View File

@@ -38,10 +38,9 @@ jobs:
python-version: 3.12
- name: Install uv
uses: astral-sh/setup-uv@v6
with:
enable-cache: true
activate-environment: true
run: |
curl -LsSf https://astral.sh/uv/install.sh | sh
echo "$HOME/.cargo/bin" >> $GITHUB_PATH
- name: Set release version
run: |

View File

@@ -20,11 +20,9 @@ jobs:
python-version: 3.12
- name: Install uv
uses: astral-sh/setup-uv@v6
with:
enable-cache: true
activate-environment: true
cache-dependency-glob: "uv.lock"
run: |
curl -LsSf https://astral.sh/uv/install.sh | sh
echo "$HOME/.cargo/bin" >> $GITHUB_PATH
- name: Build the Python package
run: uv build

View File

@@ -233,9 +233,9 @@ jobs:
ref: ${{ github.event.pull_request.head.ref }}
- name: Install uv
uses: astral-sh/setup-uv@v6
with:
enable-cache: true
run: |
curl -LsSf https://astral.sh/uv/install.sh | sh
echo "$HOME/.cargo/bin" >> $GITHUB_PATH
- name: Set core directory
id: detect-core-dir
@@ -367,7 +367,6 @@ jobs:
LETTA_MISTRAL_API_KEY: ${{ secrets.LETTA_MISTRAL_API_KEY }}
# External service API Keys (shared across all test types)
COMPOSIO_API_KEY: ${{ env.COMPOSIO_API_KEY }}
E2B_API_KEY: ${{ env.E2B_API_KEY }}
E2B_SANDBOX_TEMPLATE_ID: ${{ env.E2B_SANDBOX_TEMPLATE_ID }}

View File

@@ -25,7 +25,7 @@ jobs:
**
.github/workflows/reusable-test-workflow.yml
.github/workflows/send-message-integration-tests.yml
install-args: '--extra dev --extra postgres --extra external-tools --extra cloud-tool-sandbox --extra google --extra redis'
install-args: '--extra dev --extra postgres --extra external-tools --extra cloud-tool-sandbox --extra redis'
timeout-minutes: 15
runner: '["self-hosted", "medium"]'
ref: ${{ github.event.pull_request.head.sha || github.sha }}

View File

@@ -32,7 +32,7 @@ jobs:
with:
test-type: "integration"
is-external-pr: ${{ github.event_name == 'pull_request_target' && !contains(github.event.pull_request.labels.*.name, 'safe to test') }}
install-args: "--extra postgres --extra external-tools --extra dev --extra cloud-tool-sandbox --extra google"
install-args: "--extra postgres --extra external-tools --extra dev --extra cloud-tool-sandbox"
test-command: "uv run pytest -svv tests/"
timeout-minutes: 60
runner: '["self-hosted", "gpu", "lmstudio"]'

View File

@@ -32,7 +32,7 @@ jobs:
with:
test-type: "integration"
is-external-pr: ${{ github.event_name == 'pull_request_target' && !contains(github.event.pull_request.labels.*.name, 'safe to test') }}
install-args: "--extra postgres --extra external-tools --extra dev --extra cloud-tool-sandbox --extra google"
install-args: "--extra postgres --extra external-tools --extra dev --extra cloud-tool-sandbox"
test-command: "uv run --frozen pytest -svv tests/"
timeout-minutes: 60
runner: '["self-hosted", "gpu", "ollama"]'

View File

@@ -28,7 +28,7 @@ jobs:
with:
test-type: "integration"
is-external-pr: ${{ github.event_name == 'pull_request_target' && !contains(github.event.pull_request.labels.*.name, 'safe to test') }}
install-args: "--extra postgres --extra external-tools --extra dev --extra cloud-tool-sandbox --extra google"
install-args: "--extra postgres --extra external-tools --extra dev --extra cloud-tool-sandbox"
test-command: "uv run --frozen pytest -svv tests/"
timeout-minutes: 60
runner: '["self-hosted", "gpu", "vllm"]'

View File

@@ -1,6 +1,6 @@
# Start with pgvector base for builder
FROM ankane/pgvector:v0.5.1 AS builder
# comment to trigger ci
# Install Python and required packages
RUN apt-get update && apt-get install -y \
python3 \
@@ -69,8 +69,7 @@ ENV LETTA_ENVIRONMENT=${LETTA_ENVIRONMENT} \
PATH="/app/.venv/bin:$PATH" \
POSTGRES_USER=letta \
POSTGRES_PASSWORD=letta \
POSTGRES_DB=letta \
COMPOSIO_DISABLE_VERSION_CHECK=true
POSTGRES_DB=letta
ARG LETTA_VERSION
ENV LETTA_VERSION=${LETTA_VERSION}

View File

@@ -0,0 +1,55 @@
"""add cascades to blocks_agents FKs; set initially immediate
Revision ID: 038e68cdf0df
Revises: b6061da886ee
Create Date: 2025-10-07 13:01:17.872405
"""
from typing import Sequence, Union
import sqlalchemy as sa
from alembic import op
# revision identifiers, used by Alembic.
revision: str = "038e68cdf0df"
down_revision: Union[str, None] = "b6061da886ee"
branch_labels: Union[str, Sequence[str], None] = None
depends_on: Union[str, Sequence[str], None] = None
def upgrade() -> None:
# ### commands auto generated by Alembic - please adjust! ###
op.drop_constraint(op.f("blocks_agents_agent_id_fkey"), "blocks_agents", type_="foreignkey")
op.drop_constraint(op.f("fk_block_id_label"), "blocks_agents", type_="foreignkey")
op.create_foreign_key(
"fk_block_id_label",
"blocks_agents",
"block",
["block_id", "block_label"],
["id", "label"],
onupdate="CASCADE",
ondelete="CASCADE",
initially="IMMEDIATE",
deferrable=True,
)
op.create_foreign_key(None, "blocks_agents", "agents", ["agent_id"], ["id"], ondelete="CASCADE")
# ### end Alembic commands ###
def downgrade() -> None:
# ### commands auto generated by Alembic - please adjust! ###
op.drop_constraint(None, "blocks_agents", type_="foreignkey")
op.drop_constraint("fk_block_id_label", "blocks_agents", type_="foreignkey")
op.create_foreign_key(
op.f("fk_block_id_label"),
"blocks_agents",
"block",
["block_id", "block_label"],
["id", "label"],
initially="DEFERRED",
deferrable=True,
)
op.create_foreign_key(op.f("blocks_agents_agent_id_fkey"), "blocks_agents", "agents", ["agent_id"], ["id"])
# ### end Alembic commands ###

View File

@@ -0,0 +1,33 @@
"""create new runs table and remove legacy tables
Revision ID: 3bc3c031fbe4
Revises: 567e9fe06270
Create Date: 2025-10-03 12:10:51.065067
"""
from typing import Sequence, Union
import sqlalchemy as sa
from alembic import op
# revision identifiers, used by Alembic.
revision: str = "3bc3c031fbe4"
down_revision: Union[str, None] = "567e9fe06270"
branch_labels: Union[str, Sequence[str], None] = None
depends_on: Union[str, Sequence[str], None] = None
def upgrade() -> None:
# ### commands auto generated by Alembic - please adjust! ###
op.create_index("ix_agents_project_id", "agents", ["project_id"], unique=False)
op.create_index("ix_messages_run_id", "messages", ["run_id"], unique=False)
# ### end Alembic commands ###
def downgrade() -> None:
# ### commands auto generated by Alembic - please adjust! ###
op.drop_index("ix_messages_run_id", table_name="messages")
op.drop_index("ix_agents_project_id", table_name="agents")
# ### end Alembic commands ###

View File

@@ -0,0 +1,68 @@
"""Add additional indexes
Revision ID: 3d2e9fb40a3c
Revises: 57bcea83af3f
Create Date: 2025-09-20 00:00:00.000000
"""
from typing import Sequence, Union
import sqlalchemy as sa
from alembic import op
# revision identifiers, used by Alembic.
revision: str = "3d2e9fb40a3c"
down_revision: Union[str, None] = "57bcea83af3f"
branch_labels: Union[str, Sequence[str], None] = None
depends_on: Union[str, Sequence[str], None] = None
def _create_index_if_missing(index_name: str, table_name: str, columns: list[str], unique: bool = False) -> None:
"""Create an index if it does not already exist.
Uses SQLAlchemy inspector to avoid duplicate index errors across environments.
"""
bind = op.get_bind()
inspector = sa.inspect(bind)
existing = {ix["name"] for ix in inspector.get_indexes(table_name)}
if index_name not in existing:
op.create_index(index_name, table_name, columns, unique=unique)
def upgrade() -> None:
# files_agents: speed up WHERE agent_id IN (...)
_create_index_if_missing("ix_files_agents_agent_id", "files_agents", ["agent_id"])
# block: speed up common org+deployment filters
_create_index_if_missing(
"ix_block_organization_id_deployment_id",
"block",
["organization_id", "deployment_id"],
)
# agents: speed up common org+deployment filters
_create_index_if_missing(
"ix_agents_organization_id_deployment_id",
"agents",
["organization_id", "deployment_id"],
)
# Note: The index on block.current_history_entry_id (ix_block_current_history_entry_id)
# already exists from prior migrations. If drift is suspected, consider verifying
# and recreating it manually to avoid duplicate indexes under different names.
def downgrade() -> None:
# Drop indexes added in this migration (ignore if missing for portability)
for name, table in [
("ix_agents_organization_id_deployment_id", "agents"),
("ix_block_organization_id_deployment_id", "block"),
("ix_files_agents_agent_id", "files_agents"),
]:
try:
op.drop_index(name, table_name=table)
except Exception:
# Be permissive in environments where indexes may have different names
pass

View File

@@ -0,0 +1,128 @@
"""create new runs table and remove legacy tables
Revision ID: 567e9fe06270
Revises: 3d2e9fb40a3c
Create Date: 2025-09-22 15:22:28.651178
"""
from typing import Sequence, Union
import sqlalchemy as sa
from sqlalchemy.dialects import postgresql
from alembic import op
# revision identifiers, used by Alembic.
revision: str = "567e9fe06270"
down_revision: Union[str, None] = "3d2e9fb40a3c"
branch_labels: Union[str, Sequence[str], None] = None
depends_on: Union[str, Sequence[str], None] = None
def upgrade() -> None:
# ### commands auto generated by Alembic - please adjust! ###
op.create_table(
"runs",
sa.Column("id", sa.String(), nullable=False),
sa.Column("status", sa.String(), nullable=False),
sa.Column("completed_at", sa.DateTime(), nullable=True),
sa.Column("stop_reason", sa.String(), nullable=True),
sa.Column("background", sa.Boolean(), nullable=True),
sa.Column("metadata_", sa.JSON(), nullable=True),
sa.Column("request_config", sa.JSON(), nullable=True),
sa.Column("agent_id", sa.String(), nullable=False),
sa.Column("callback_url", sa.String(), nullable=True),
sa.Column("callback_sent_at", sa.DateTime(), nullable=True),
sa.Column("callback_status_code", sa.Integer(), nullable=True),
sa.Column("callback_error", sa.String(), nullable=True),
sa.Column("ttft_ns", sa.BigInteger(), nullable=True),
sa.Column("total_duration_ns", sa.BigInteger(), nullable=True),
sa.Column("created_at", sa.DateTime(timezone=True), server_default=sa.text("now()"), nullable=True),
sa.Column("updated_at", sa.DateTime(timezone=True), server_default=sa.text("now()"), nullable=True),
sa.Column("is_deleted", sa.Boolean(), server_default=sa.text("FALSE"), nullable=False),
sa.Column("_created_by_id", sa.String(), nullable=True),
sa.Column("_last_updated_by_id", sa.String(), nullable=True),
sa.Column("organization_id", sa.String(), nullable=False),
sa.Column("project_id", sa.String(), nullable=True),
sa.Column("base_template_id", sa.String(), nullable=True),
sa.Column("template_id", sa.String(), nullable=True),
sa.Column("deployment_id", sa.String(), nullable=True),
sa.ForeignKeyConstraint(
["agent_id"],
["agents.id"],
),
sa.ForeignKeyConstraint(
["organization_id"],
["organizations.id"],
),
sa.PrimaryKeyConstraint("id"),
)
op.create_index("ix_runs_agent_id", "runs", ["agent_id"], unique=False)
op.create_index("ix_runs_created_at", "runs", ["created_at", "id"], unique=False)
op.create_index("ix_runs_organization_id", "runs", ["organization_id"], unique=False)
op.drop_index(op.f("ix_agents_runs_agent_id_run_id"), table_name="agents_runs")
op.drop_index(op.f("ix_agents_runs_run_id_agent_id"), table_name="agents_runs")
op.drop_table("agents_runs")
op.drop_table("job_messages")
op.add_column("messages", sa.Column("run_id", sa.String(), nullable=True))
op.create_foreign_key("fk_messages_run_id", "messages", "runs", ["run_id"], ["id"], ondelete="SET NULL")
op.add_column("step_metrics", sa.Column("run_id", sa.String(), nullable=True))
op.drop_constraint(op.f("step_metrics_job_id_fkey"), "step_metrics", type_="foreignkey")
op.create_foreign_key("fk_step_metrics_run_id", "step_metrics", "runs", ["run_id"], ["id"], ondelete="SET NULL")
op.drop_column("step_metrics", "job_id")
op.add_column("steps", sa.Column("run_id", sa.String(), nullable=True))
op.drop_index(op.f("ix_steps_job_id"), table_name="steps")
op.create_index("ix_steps_run_id", "steps", ["run_id"], unique=False)
op.drop_constraint(op.f("fk_steps_job_id"), "steps", type_="foreignkey")
op.create_foreign_key("fk_steps_run_id", "steps", "runs", ["run_id"], ["id"], ondelete="SET NULL")
op.drop_column("steps", "job_id")
# ### end Alembic commands ###
def downgrade() -> None:
# ### commands auto generated by Alembic - please adjust! ###
op.add_column("steps", sa.Column("job_id", sa.VARCHAR(), autoincrement=False, nullable=True))
op.drop_constraint("fk_steps_run_id", "steps", type_="foreignkey")
op.create_foreign_key(op.f("fk_steps_job_id"), "steps", "jobs", ["job_id"], ["id"], ondelete="SET NULL")
op.drop_index("ix_steps_run_id", table_name="steps")
op.create_index(op.f("ix_steps_job_id"), "steps", ["job_id"], unique=False)
op.drop_column("steps", "run_id")
op.add_column("step_metrics", sa.Column("job_id", sa.VARCHAR(), autoincrement=False, nullable=True))
op.drop_constraint("fk_step_metrics_run_id", "step_metrics", type_="foreignkey")
op.create_foreign_key(op.f("step_metrics_job_id_fkey"), "step_metrics", "jobs", ["job_id"], ["id"], ondelete="SET NULL")
op.drop_column("step_metrics", "run_id")
op.drop_constraint("fk_messages_run_id", "messages", type_="foreignkey")
op.drop_column("messages", "run_id")
op.create_table(
"job_messages",
sa.Column("id", sa.INTEGER(), autoincrement=True, nullable=False),
sa.Column("job_id", sa.VARCHAR(), autoincrement=False, nullable=False),
sa.Column("message_id", sa.VARCHAR(), autoincrement=False, nullable=False),
sa.Column("created_at", postgresql.TIMESTAMP(timezone=True), server_default=sa.text("now()"), autoincrement=False, nullable=True),
sa.Column("updated_at", postgresql.TIMESTAMP(timezone=True), server_default=sa.text("now()"), autoincrement=False, nullable=True),
sa.Column("is_deleted", sa.BOOLEAN(), server_default=sa.text("false"), autoincrement=False, nullable=False),
sa.Column("_created_by_id", sa.VARCHAR(), autoincrement=False, nullable=True),
sa.Column("_last_updated_by_id", sa.VARCHAR(), autoincrement=False, nullable=True),
sa.ForeignKeyConstraint(["job_id"], ["jobs.id"], name=op.f("fk_job_messages_job_id"), ondelete="CASCADE"),
sa.ForeignKeyConstraint(["message_id"], ["messages.id"], name=op.f("fk_job_messages_message_id"), ondelete="CASCADE"),
sa.PrimaryKeyConstraint("id", name=op.f("pk_job_messages")),
sa.UniqueConstraint(
"job_id", "message_id", name=op.f("unique_job_message"), postgresql_include=[], postgresql_nulls_not_distinct=False
),
)
op.create_table(
"agents_runs",
sa.Column("agent_id", sa.VARCHAR(), autoincrement=False, nullable=False),
sa.Column("run_id", sa.VARCHAR(), autoincrement=False, nullable=False),
sa.ForeignKeyConstraint(["agent_id"], ["agents.id"], name=op.f("agents_runs_agent_id_fkey")),
sa.ForeignKeyConstraint(["run_id"], ["jobs.id"], name=op.f("agents_runs_run_id_fkey")),
sa.PrimaryKeyConstraint("agent_id", "run_id", name=op.f("unique_agent_run")),
)
op.create_index(op.f("ix_agents_runs_run_id_agent_id"), "agents_runs", ["run_id", "agent_id"], unique=False)
op.create_index(op.f("ix_agents_runs_agent_id_run_id"), "agents_runs", ["agent_id", "run_id"], unique=False)
op.drop_index("ix_runs_organization_id", table_name="runs")
op.drop_index("ix_runs_created_at", table_name="runs")
op.drop_index("ix_runs_agent_id", table_name="runs")
op.drop_table("runs")
# ### end Alembic commands ###

View File

@@ -0,0 +1,43 @@
"""add various indexes
Revision ID: 57bcea83af3f
Revises: 5973fd8b8c60
Create Date: 2025-09-19 10:58:19.658106
"""
from typing import Sequence, Union
import sqlalchemy as sa
from alembic import op
# revision identifiers, used by Alembic.
revision: str = "57bcea83af3f"
down_revision: Union[str, None] = "5973fd8b8c60"
branch_labels: Union[str, Sequence[str], None] = None
depends_on: Union[str, Sequence[str], None] = None
def upgrade() -> None:
# ### commands auto generated by Alembic - please adjust! ###
op.create_index("ix_block_hidden", "block", ["hidden"], unique=False)
op.create_index("ix_block_is_template", "block", ["is_template"], unique=False)
op.create_index("ix_block_org_project_template", "block", ["organization_id", "project_id", "is_template"], unique=False)
op.create_index("ix_block_organization_id", "block", ["organization_id"], unique=False)
op.create_index("ix_block_project_id", "block", ["project_id"], unique=False)
op.create_index("ix_jobs_user_id", "jobs", ["user_id"], unique=False)
op.create_index("ix_steps_job_id", "steps", ["job_id"], unique=False)
# ### end Alembic commands ###
def downgrade() -> None:
# ### commands auto generated by Alembic - please adjust! ###
op.drop_index("ix_steps_job_id", table_name="steps")
op.drop_index("ix_jobs_user_id", table_name="jobs")
op.drop_index("ix_block_project_id", table_name="block")
op.drop_index("ix_block_organization_id", table_name="block")
op.drop_index("ix_block_org_project_template", table_name="block")
op.drop_index("ix_block_is_template", table_name="block")
op.drop_index("ix_block_hidden", table_name="block")
# ### end Alembic commands ###

View File

@@ -0,0 +1,51 @@
"""add agents_runs table
Revision ID: 5973fd8b8c60
Revises: eff256d296cb
Create Date: 2025-09-18 10:52:46.270241
"""
from typing import Sequence, Union
import sqlalchemy as sa
from alembic import op
# revision identifiers, used by Alembic.
revision: str = "5973fd8b8c60"
down_revision: Union[str, None] = "eff256d296cb"
branch_labels: Union[str, Sequence[str], None] = None
depends_on: Union[str, Sequence[str], None] = None
def upgrade() -> None:
# ### commands auto generated by Alembic - please adjust! ###
op.create_table(
"agents_runs",
sa.Column("agent_id", sa.String(), nullable=False),
sa.Column("run_id", sa.String(), nullable=False),
sa.ForeignKeyConstraint(
["agent_id"],
["agents.id"],
),
sa.ForeignKeyConstraint(
["run_id"],
["jobs.id"],
),
sa.PrimaryKeyConstraint("agent_id", "run_id"),
sa.UniqueConstraint("agent_id", "run_id", name="unique_agent_run"),
)
op.create_index("ix_agents_runs_agent_id_run_id", "agents_runs", ["agent_id", "run_id"], unique=False)
op.create_index("ix_agents_runs_run_id_agent_id", "agents_runs", ["run_id", "agent_id"], unique=False)
op.add_column("jobs", sa.Column("background", sa.Boolean(), nullable=True))
# ### end Alembic commands ###
def downgrade() -> None:
# ### commands auto generated by Alembic - please adjust! ###
op.drop_column("jobs", "background")
op.drop_index("ix_agents_runs_run_id_agent_id", table_name="agents_runs")
op.drop_index("ix_agents_runs_agent_id_run_id", table_name="agents_runs")
op.drop_table("agents_runs")
# ### end Alembic commands ###

View File

@@ -0,0 +1,33 @@
"""replace composite runs index
Revision ID: 89b595051e48
Revises: f9ad1c25fd2b
Create Date: 2025-10-06 13:17:09.918439
"""
from typing import Sequence, Union
import sqlalchemy as sa
from alembic import op
# revision identifiers, used by Alembic.
revision: str = "89b595051e48"
down_revision: Union[str, None] = "f9ad1c25fd2b"
branch_labels: Union[str, Sequence[str], None] = None
depends_on: Union[str, Sequence[str], None] = None
def upgrade() -> None:
# ### commands auto generated by Alembic - please adjust! ###
op.drop_index(op.f("ix_messages_run_err_sequence"), table_name="messages")
op.create_index("ix_messages_run_sequence", "messages", ["run_id", "sequence_id"], unique=False)
# ### end Alembic commands ###
def downgrade() -> None:
# ### commands auto generated by Alembic - please adjust! ###
op.drop_index("ix_messages_run_sequence", table_name="messages")
op.create_index(op.f("ix_messages_run_err_sequence"), "messages", ["run_id", "is_err", "sequence_id"], unique=False)
# ### end Alembic commands ###

View File

@@ -0,0 +1,39 @@
"""add encrypted columns
Revision ID: b6061da886ee
Revises: 89b595051e48
Create Date: 2025-10-06 14:55:32.554544
"""
from typing import Sequence, Union
import sqlalchemy as sa
from alembic import op
# revision identifiers, used by Alembic.
revision: str = "b6061da886ee"
down_revision: Union[str, None] = "89b595051e48"
branch_labels: Union[str, Sequence[str], None] = None
depends_on: Union[str, Sequence[str], None] = None
def upgrade() -> None:
# ### commands auto generated by Alembic - please adjust! ###
op.add_column("agent_environment_variables", sa.Column("value_enc", sa.Text(), nullable=True))
op.add_column("mcp_oauth", sa.Column("authorization_code_enc", sa.Text(), nullable=True))
op.add_column("providers", sa.Column("api_key_enc", sa.Text(), nullable=True))
op.add_column("providers", sa.Column("access_key_enc", sa.Text(), nullable=True))
op.add_column("sandbox_environment_variables", sa.Column("value_enc", sa.Text(), nullable=True))
# ### end Alembic commands ###
def downgrade() -> None:
# ### commands auto generated by Alembic - please adjust! ###
op.drop_column("sandbox_environment_variables", "value_enc")
op.drop_column("providers", "access_key_enc")
op.drop_column("providers", "api_key_enc")
op.drop_column("mcp_oauth", "authorization_code_enc")
op.drop_column("agent_environment_variables", "value_enc")
# ### end Alembic commands ###

View File

@@ -9,7 +9,6 @@ Create Date: 2025-01-16 14:21:33.764332
from typing import Sequence, Union
from alembic import op
from letta.schemas.enums import ToolType
from letta.settings import settings
# revision identifiers, used by Alembic.
@@ -25,8 +24,8 @@ def upgrade() -> None:
return
# ### commands auto generated by Alembic - please adjust! ###
# Define the value for EXTERNAL_COMPOSIO
external_composio_value = ToolType.EXTERNAL_COMPOSIO.value
# Define the value for EXTERNAL_COMPOSIO (using string literal since enum was removed)
external_composio_value = "external_composio"
# Update tool_type to EXTERNAL_COMPOSIO if the tags field includes "composio"
# This is super brittle and awful but no other way to do this
@@ -46,7 +45,8 @@ def downgrade() -> None:
return
# ### commands auto generated by Alembic - please adjust! ###
custom_value = ToolType.CUSTOM.value
# Use string literal for CUSTOM value
custom_value = "custom"
# Update tool_type to CUSTOM if the tags field includes "composio"
# This is super brittle and awful but no other way to do this

View File

@@ -0,0 +1,31 @@
"""add query optimizing runs listing
Revision ID: f9ad1c25fd2b
Revises: 3bc3c031fbe4
Create Date: 2025-10-04 00:44:06.663817
"""
from typing import Sequence, Union
import sqlalchemy as sa
from alembic import op
# revision identifiers, used by Alembic.
revision: str = "f9ad1c25fd2b"
down_revision: Union[str, None] = "3bc3c031fbe4"
branch_labels: Union[str, Sequence[str], None] = None
depends_on: Union[str, Sequence[str], None] = None
def upgrade() -> None:
# ### commands auto generated by Alembic - please adjust! ###
op.create_index("ix_messages_run_err_sequence", "messages", ["run_id", "is_err", "sequence_id"], unique=False)
# ### end Alembic commands ###
def downgrade() -> None:
# ### commands auto generated by Alembic - please adjust! ###
op.drop_index("ix_messages_run_err_sequence", table_name="messages")
# ### end Alembic commands ###

View File

@@ -1,4 +1,6 @@
#!/usr/bin/env tsx
/* eslint-disable @typescript-eslint/no-non-null-assertion */
/**
* Minimal TypeScript examples showing Letta's streaming API.
* Demonstrates both step streaming (default) and token streaming modes.
@@ -20,6 +22,7 @@ async function stepStreamingExample(client: LettaClient, agentId: string): Promi
for await (const chunk of stream as AsyncIterable<LettaMessage>) {
// Each chunk is a complete message
if (chunk.messageType === 'assistant_message') {
// eslint-disable-next-line @typescript-eslint/no-explicit-any
console.log((chunk as any).content);
}
}
@@ -49,6 +52,7 @@ async function tokenStreamingExample(client: LettaClient, agentId: string): Prom
}
// Accumulate and print content
// eslint-disable-next-line @typescript-eslint/no-explicit-any
const contentChunk = (chunk as any).content || '';
messageAccumulators.set(msgId, messageAccumulators.get(msgId)! + contentChunk);
process.stdout.write(contentChunk);
@@ -99,4 +103,4 @@ async function main(): Promise<void> {
}
// Run the example
main().catch(console.error);
main().catch(console.error);

140
fern/diagrams/README.md Normal file
View File

@@ -0,0 +1,140 @@
# Letta Documentation Diagrams
This directory contains mermaid diagram code for the Letta documentation.
## Diagrams Included
### 1. Agent Reasoning Loop (`agent-reasoning-loop.md`)
**Purpose:** Shows how an agent processes a user message step-by-step
**Location:** `fern/pages/agents/overview.mdx`
**Key insight:** Illustrates the complete lifecycle from request to response, including tool calls
### 2. Memory Hierarchy (`memory-hierarchy.md`)
**Purpose:** Explains the difference between in-context and out-of-context memory
**Location:** `fern/pages/agents/memory.mdx`
**Key insight:** Clarifies why memory blocks are different from RAG/vector search
### 3. Stateful vs Stateless (`stateful-vs-stateless.md`)
**Purpose:** Shows why Letta's stateful design is fundamentally different
**Location:** `fern/pages/concepts/letta.mdx` or homepage
**Key insight:** The "aha moment" - explains why you only send new messages
### 4. Tool Execution Lifecycle (`tool-execution-lifecycle.md`)
**Purpose:** Demystifies how tools are registered, called, and executed
**Location:** `fern/pages/agents/tools.mdx`
**Key insight:** Shows the sandbox execution and tool schema generation
### 5. System Architecture (`system-architecture.md`)
**Purpose:** Complete picture of all Letta components
**Location:** `fern/pages/getting-started/letta_platform.mdx`
**Key insight:** Shows how everything fits together
## How to Use These Diagrams
### 1. Copy the mermaid code blocks into your .mdx files
```markdown
---
title: Your Page Title
---
Your intro text...
```mermaid
[paste diagram code here]
```
Your explanation text...
```
### 2. Customize as needed
Each diagram includes:
- Main version (detailed)
- Alternative version (simplified)
- Explanation text
- Usage notes
Use whichever fits your page best.
### 3. Styling
Mermaid supports both light and dark themes automatically. The diagrams use colors that work in both modes.
To customize colors:
```mermaid
graph TB
A[Node]
style A fill:#e3f2fd
```
## Recommended Diagram Placements
### Critical (Add immediately)
1. **Stateful vs Stateless** → Homepage or concepts page (highest impact)
2. **Agent Reasoning Loop** → Agents overview page
3. **Memory Hierarchy** → Memory guide page
### High Priority
4. **Tool Execution** → Tools guide page
5. **System Architecture** → Platform overview page
### Future Additions
6. Multi-agent communication diagram
7. Sleep-time agent architecture
8. Context window management
9. Streaming architecture
10. Authentication flow
## Creating New Diagrams
When creating new diagrams for Letta docs:
### Use consistent colors:
- Blue (`#e3f2fd`) - Client/API layer
- Purple (`#f3e5f5`) - Server/runtime
- Yellow (`#fff9c4`) - Storage/memory
- Green (`#e8f5e9`) - External services
### Keep them simple:
- One concept per diagram
- 5-10 nodes maximum
- Clear labels and annotations
### Provide alternatives:
- Detailed version for in-depth pages
- Simplified version for quickstarts
- Code comparison when relevant
### Include explanations:
- What the diagram shows
- Why it matters
- How it relates to code
## Mermaid Resources
- [Mermaid Live Editor](https://mermaid.live/) - Test your diagrams
- [Mermaid Documentation](https://mermaid.js.org/) - Syntax reference
- [Fern Mermaid Support](https://buildwithfern.com/learn/docs/content/diagrams) - How Fern renders mermaid
## Testing
Before committing diagrams:
1. Test in [Mermaid Live Editor](https://mermaid.live/)
2. Check both light and dark themes
3. Verify on mobile (diagrams should be responsive)
4. Ensure text is readable at all sizes
## Contributing
To add a new diagram:
1. Create a new `.md` file in this directory
2. Include mermaid code, alternatives, and explanation
3. Add entry to this README
4. Open PR with screenshot of rendered diagram
## Questions?
Slack: #docs
Owner: Documentation Team

View File

@@ -0,0 +1,104 @@
# Agent Reasoning Loop
**Location:** Add to `fern/pages/agents/overview.mdx` after the "Building Stateful Agents" introduction
**What it shows:** The complete lifecycle of an agent processing a user message, including internal reasoning, tool calls, and responses.
## Diagram Code
```mermaid
sequenceDiagram
participant User
participant API as Letta API
participant Agent as Agent Runtime
participant LLM
participant Tools
participant DB as Database
User->>API: POST /agents/{id}/messages
Note over User,API: {"role": "user", "content": "..."}
API->>DB: Load agent state
DB-->>API: AgentState + Memory
API->>Agent: Process message
rect rgb(240, 248, 255)
Note over Agent,LLM: Agent Step 1
Agent->>LLM: Context + User message
Note over Agent,LLM: Context includes:<br/>- System prompt<br/>- Memory blocks<br/>- Available tools<br/>- Recent messages
LLM-->>Agent: Reasoning + Tool call
Note over Agent: reasoning_message:<br/>"User asked about...<br/>I should check..."
Agent->>DB: Save reasoning message
Agent->>Tools: Execute tool
Tools-->>Agent: Tool result
Note over Agent: tool_return_message
Agent->>DB: Save tool call + result
end
rect rgb(255, 250, 240)
Note over Agent,LLM: Agent Step 2
Agent->>LLM: Context + Tool result
LLM-->>Agent: Response to user
Note over Agent: assistant_message:<br/>"Based on the data..."
Agent->>DB: Save response
end
Agent->>DB: Update agent state
Note over DB: State persisted:<br/>- New messages<br/>- Updated memory<br/>- Usage stats
Agent-->>API: Response object
API-->>User: HTTP 200 + messages
Note over User,API: {messages: [reasoning, tool_call,<br/>tool_return, assistant]}
```
## Alternative: Simplified Version
If the above is too detailed, use this simpler version:
```mermaid
sequenceDiagram
participant User
participant Agent
participant LLM
participant Tools
User->>Agent: "What's the weather?"
loop Agent Reasoning Loop
Agent->>LLM: Send context + message
LLM-->>Agent: Think + decide action
alt Agent calls tool
Agent->>Tools: Execute tool
Tools-->>Agent: Return result
Note over Agent: Continue loop with result
else Agent responds to user
Agent-->>User: "It's sunny, 72°F"
Note over Agent: Loop ends
end
end
```
## Explanation to Add
After the diagram, add this text:
> **How it works:**
>
> 1. **User sends message** - A single new message arrives via the API
> 2. **Agent loads context** - System retrieves agent state, memory blocks, and conversation history from the database
> 3. **LLM reasoning** - The agent thinks through the problem (chain-of-thought)
> 4. **Tool execution** - If needed, the agent calls tools to gather information or take actions
> 5. **Response generation** - The agent formulates its final response to the user
> 6. **State persistence** - All steps are saved to the database for future context
>
> Unlike stateless APIs, this entire loop happens **server-side**, and the agent's state persists between messages.
## Usage Notes
- Use the **detailed version** for the main agents overview page
- Use the **simplified version** for the quickstart guide
- Link between the two versions

View File

@@ -0,0 +1,128 @@
# Memory Hierarchy Architecture
**Location:** Add to `fern/pages/agents/memory.mdx` replacing or expanding the current content
**What it shows:** How Letta's memory system works with in-context and out-of-context storage tiers.
## Diagram Code
```mermaid
graph TB
subgraph Context["🧠 LLM Context Window (In-Context Memory)"]
direction TB
SP[System Prompt]
MB[Memory Blocks]
RM[Recent Messages]
subgraph MemBlocks["Core Memory (Self-Editing)"]
P[👤 Persona Block<br/>Who the agent is]
H[👥 Human Block<br/>Who you are]
C1[📝 Custom Block 1<br/>Project context]
C2[📊 Custom Block 2<br/>Task state]
end
SP --> MB
MB --> MemBlocks
MB --> RM
end
subgraph External["💾 External Storage (Out-of-Context Memory)"]
direction TB
subgraph Recall["Recall Memory (Archival)"]
OLD[Older Messages<br/>Searchable by semantic similarity]
end
subgraph Data["Data Sources"]
FILES[Files & Documents<br/>PDFs, text, etc.]
ARCH[Archival Memory<br/>Facts & knowledge]
end
end
MemBlocks -->|Agent edits| MemBlocks
MemBlocks -.->|Agent searches when needed| Recall
MemBlocks -.->|Agent searches when needed| Data
RM -->|When context fills| Recall
style Context fill:#e3f2fd
style External fill:#f3e5f5
style MemBlocks fill:#fff9c4
style P fill:#c8e6c9
style H fill:#c8e6c9
style C1 fill:#ffecb3
style C2 fill:#ffecb3
classDef editableClass stroke:#4caf50,stroke-width:3px
class P,H,C1,C2 editableClass
```
## Alternative: Simpler Conceptual View
```mermaid
graph LR
subgraph Fast["⚡ Core Memory<br/>(Always in context)"]
CORE[Memory Blocks<br/>Editable by agent<br/>Always available]
end
subgraph Slow["🔍 External Memory<br/>(Retrieved when needed)"]
EXT[Conversation History<br/>Files & Documents<br/>Searchable]
end
AGENT[Agent] --> |Reads/Writes| CORE
AGENT -.-> |Searches| Slow
style Fast fill:#c8e6c9
style Slow fill:#e1bee7
```
## Memory Comparison Table
Add this table after the diagram:
```markdown
## Memory Types in Letta
| Memory Type | Location | Size | Speed | Use Case |
|------------|----------|------|-------|----------|
| **Persona Block** | In-context | ~200 tokens | Instant | Agent's identity and behavior |
| **Human Block** | In-context | ~200 tokens | Instant | User information and preferences |
| **Custom Blocks** | In-context | ~200 tokens each | Instant | Task-specific context |
| **Recent Messages** | In-context | Variable | Instant | Conversation flow |
| **Recall Memory** | Out-of-context | Unlimited | ~1-2 sec | Old conversation history |
| **Data Sources** | Out-of-context | Unlimited | ~1-2 sec | Documents and knowledge |
```
## Explanation to Add
After the diagram:
> **How memory works in Letta:**
>
> **Core Memory (In-Context)**
> - **Memory blocks** are always in the LLM's context window
> - Agents can **edit these directly** using built-in tools like `core_memory_replace`
> - Changes persist across conversations
> - Limited by context window size (~2-4KB total)
> - Think of it as "working memory" or "short-term memory"
>
> **External Memory (Out-of-Context)**
> - **Recall memory** stores older messages that don't fit in context
> - **Data sources** store files and documents you upload
> - Agents **search these** when they need information
> - Unlimited size (stored in database)
> - Retrieved via semantic similarity search
> - Think of it as "long-term memory" or "external knowledge"
>
> **Why this matters:**
> Unlike RAG systems that retrieve everything on-demand, Letta agents have a **persistent working memory** that they actively manage. This enables:
> - Personalization that improves over time
> - Task continuity across sessions
> - Contextual awareness without re-retrieving everything
> - Self-directed memory management
## Usage Notes
- Use the **detailed graph** for the memory guide page
- Use the **simplified graph** for the quickstart or overview
- The table helps developers choose the right memory type

View File

@@ -0,0 +1,161 @@
# Stateful vs Stateless: Why Letta is Different
**Location:** Add to `fern/pages/concepts/letta.mdx` early in the document
**What it shows:** The fundamental difference between Letta's stateful agents and traditional stateless LLM APIs.
## Diagram Code
```mermaid
graph TB
subgraph Traditional["❌ Traditional Stateless API (e.g., ChatCompletions)"]
direction TB
U1[User/App]
API1[LLM API]
U1 -->|"Request 1:<br/>[msg1]"| API1
API1 -->|Response 1| U1
U1 -->|"Request 2:<br/>[msg1, response1, msg2]"| API1
API1 -->|Response 2| U1
U1 -->|"Request 3:<br/>[msg1, res1, msg2, res2, msg3]"| API1
API1 -->|Response 3| U1
Note1[❌ Client manages state<br/>❌ No memory persistence<br/>❌ Conversation grows linearly<br/>❌ Context window fills quickly]
style Note1 fill:#ffebee,stroke:#c62828
end
subgraph Letta["✅ Letta Stateful Agents"]
direction TB
U2[User/App]
LETTA[Letta Server]
DB[(Database)]
U2 -->|"Request 1:<br/>[msg1]"| LETTA
LETTA -->|Save state| DB
LETTA -->|Response 1| U2
U2 -->|"Request 2:<br/>[msg2] only!"| LETTA
DB -->|Load state| LETTA
LETTA -->|Update state| DB
LETTA -->|Response 2| U2
U2 -->|"Request 3:<br/>[msg3] only!"| LETTA
DB -->|Load state| LETTA
LETTA -->|Update state| DB
LETTA -->|Response 3| U2
Note2[✅ Server manages state<br/>✅ Persistent memory<br/>✅ Send only new messages<br/>✅ Intelligent context mgmt]
style Note2 fill:#e8f5e9,stroke:#2e7d32
end
```
## Alternative: Side-by-Side Comparison
```mermaid
graph LR
subgraph Stateless["Stateless (OpenAI/Anthropic)"]
direction TB
C1[Client] -->|Full history every time| S1[API]
S1 -->|Response| C1
S1 -.->|No memory| VOID[ ]
style VOID fill:none,stroke:none
end
subgraph Stateful["Stateful (Letta)"]
direction TB
C2[Client] -->|New message only| S2[Agent]
S2 -->|Response| C2
S2 <-->|Persistent state| DB[(Memory)]
end
style Stateless fill:#ffebee
style Stateful fill:#e8f5e9
```
## Comparison Table
```markdown
## Key Differences
| Aspect | Traditional (Stateless) | Letta (Stateful) |
|--------|------------------------|------------------|
| **State management** | Client-side | Server-side |
| **Request format** | Send full conversation history | Send only new messages |
| **Memory** | None (ephemeral) | Persistent database |
| **Context limit** | Hard limit, then fails | Intelligent management |
| **Agent identity** | None | Each agent has unique ID |
| **Long conversations** | Expensive & brittle | Scales infinitely |
| **Personalization** | App must manage | Built-in memory blocks |
| **Multi-session** | Requires external DB | Native support |
## Code Comparison
### Stateless API (e.g., OpenAI)
```python
# You must send the entire conversation every time
messages = [
{"role": "user", "content": "Hello, I'm Sarah"},
{"role": "assistant", "content": "Hi Sarah!"},
{"role": "user", "content": "What's my name?"}, # ← New message
]
# Send everything
response = openai.chat.completions.create(
model="gpt-4",
messages=messages # ← Full history required
)
# You must store and manage messages yourself
messages.append(response.choices[0].message)
```
### Stateful API (Letta)
```python
# Agent already knows context
response = client.agents.messages.create(
agent_id=agent.id,
messages=[
{"role": "user", "content": "What's my name?"} # ← New message only
]
)
# Agent remembers Sarah from its memory blocks
# No need to send previous messages
```
## Explanation Text
> **Why stateful matters:**
>
> **Traditional LLM APIs are stateless** - like hitting "clear chat" after every message. Your application must:
> - Store all messages in a database
> - Send the entire conversation history with each request
> - Manage context window overflow manually
> - Implement memory/personalization logic
> - Handle session management
>
> **Letta agents are stateful services** - like persistent processes. The server:
> - Stores all agent state in its database
> - Accepts only new messages (not full history)
> - Manages context window intelligently
> - Provides built-in memory via editable blocks
> - Maintains agent identity across sessions
>
> **The result:** Instead of building a stateful layer on top of a stateless API, you get statefulness as a primitive.
## Usage Notes
This diagram should appear VERY early in the documentation, ideally:
1. On the main overview page
2. In the concepts/letta.mdx page
3. Referenced in the quickstart
It's the "aha moment" diagram that explains why Letta exists.

View File

@@ -0,0 +1,295 @@
# Letta System Architecture
**Location:** Add to `fern/pages/getting-started/letta_platform.mdx` or `fern/pages/concepts/letta.mdx`
**What it shows:** The complete Letta system with all major components and their relationships.
## Diagram Code
```mermaid
graph TB
subgraph Client["👤 Client Applications"]
PYTHON[Python SDK]
TS[TypeScript SDK]
REST[REST API]
ADE[Agent Dev Environment<br/>Web UI]
end
subgraph Server["🚀 Letta Server"]
direction TB
API[REST API Layer]
subgraph Runtime["Agent Runtime"]
LOOP[Reasoning Loop]
TOOLS[Tool Executor]
MEM[Memory Manager]
end
subgraph Services["Core Services"]
AUTH[Authentication]
QUEUE[Job Queue]
STREAM[Streaming Handler]
end
API --> Runtime
API --> Services
end
subgraph Storage["💾 Storage Layer"]
DB[(PostgreSQL/SQLite)]
VECTOR[(Vector DB<br/>pgvector)]
DB --- VECTOR
end
subgraph External["☁️ External Services"]
LLM[LLM Providers<br/>OpenAI, Anthropic,<br/>Google, etc.]
EMBED[Embedding Models<br/>OpenAI, etc.]
MCPS[MCP Servers<br/>External tools]
end
Client --> Server
Runtime --> Storage
Runtime --> External
Services --> Storage
TOOLS -.->|Optional| MCPS
style Client fill:#e3f2fd
style Server fill:#f3e5f5
style Storage fill:#fff9c4
style External fill:#e8f5e9
```
## Deployment Architecture
```mermaid
graph TB
subgraph Cloud["☁️ Letta Cloud"]
CLOUD_API[API Gateway]
CLOUD_SERVERS[Load Balanced<br/>Letta Servers]
CLOUD_DB[(Managed<br/>PostgreSQL)]
CLOUD_REDIS[(Redis Cache)]
CLOUD_API --> CLOUD_SERVERS
CLOUD_SERVERS --> CLOUD_DB
CLOUD_SERVERS --> CLOUD_REDIS
end
subgraph Self["🏠 Self-Hosted"]
DOCKER[Docker Container]
LOCAL_DB[(PostgreSQL<br/>or SQLite)]
DOCKER --> LOCAL_DB
end
subgraph Apps["Your Applications"]
WEB[Web App]
MOBILE[Mobile App]
BOT[Chatbot]
API_APP[API Service]
end
Apps --> Cloud
Apps --> Self
style Cloud fill:#e3f2fd
style Self fill:#fff9c4
```
## Data Flow Diagram
```mermaid
flowchart LR
subgraph Input
USER[User Message]
end
subgraph Processing
LOAD[Load Agent State]
CONTEXT[Build Context]
LLM[LLM Inference]
TOOLS[Execute Tools]
SAVE[Save State]
end
subgraph Output
RESPONSE[Agent Response]
end
USER --> LOAD
LOAD --> CONTEXT
CONTEXT --> LLM
LLM --> TOOLS
TOOLS --> LLM
LLM --> SAVE
SAVE --> RESPONSE
DB[(Database)] -.-> LOAD
SAVE -.-> DB
style Input fill:#e3f2fd
style Processing fill:#f3e5f5
style Output fill:#c8e6c9
```
## Component Details
```markdown
## System Components
### Client SDKs
- **Python SDK** (`letta-client`) - Full-featured client for Python applications
- **TypeScript SDK** (`@letta-ai/letta-client`) - Full-featured client for Node.js/TypeScript
- **REST API** - Direct HTTP access for any language
- **ADE (Agent Development Environment)** - Web-based UI for building and testing agents
### Letta Server
#### API Layer
- RESTful endpoints for all operations
- OpenAPI/Swagger specification
- Authentication and authorization
- Request validation
#### Agent Runtime
- **Reasoning Loop** - Manages agent execution steps
- **Tool Executor** - Runs tools in isolated sandbox
- **Memory Manager** - Handles memory block operations and recall
#### Core Services
- **Authentication** - API key management, user sessions
- **Job Queue** - Async task processing
- **Streaming Handler** - Server-sent events for real-time updates
### Storage Layer
#### Database (PostgreSQL or SQLite)
Stores:
- Agent configurations and state
- Memory blocks
- Message history
- Tools and tool definitions
- User accounts and API keys
#### Vector Database (pgvector)
Stores:
- Message embeddings for semantic search
- Document embeddings for data sources
- Enables recall memory and archival search
### External Services
#### LLM Providers
- OpenAI (GPT-4, GPT-3.5)
- Anthropic (Claude)
- Google (Gemini)
- DeepSeek, xAI, Groq, etc.
- Local providers (Ollama, LM Studio, vLLM)
#### Embedding Providers
- OpenAI embeddings
- Local embedding models
#### MCP Servers (Optional)
- External tool providers
- Connect via HTTP/SSE or stdio
- Examples: GitHub, Gmail, databases
## Deployment Options
### Letta Cloud
- Fully managed service
- Multi-tenant architecture
- Automatic scaling
- Built-in monitoring
- 99.9% uptime SLA
- Managed database and infrastructure
**Best for:**
- Quick prototyping
- Production deployments
- No infrastructure management
### Self-Hosted
- Docker container
- Full control over infrastructure
- Your own database
- Custom configuration
**Best for:**
- Data privacy requirements
- Custom infrastructure needs
- Cost optimization at scale
- Air-gapped environments
## Data Flow
1. **Request arrives** - Client sends message to API
2. **Load state** - Agent configuration and memory loaded from DB
3. **Build context** - System prompt, memory blocks, tools assembled
4. **LLM inference** - Context sent to LLM provider
5. **Tool execution** - If LLM calls tools, they execute in sandbox
6. **Iteration** - Loop continues until agent responds to user
7. **Save state** - All changes persisted to database
8. **Response** - Agent response returned to client
## Scaling Characteristics
### Horizontal Scaling
- Multiple Letta server instances behind load balancer
- Shared database for state consistency
- Redis for distributed caching (optional)
### Vertical Scaling
- Increase database resources for more agents
- More CPU/RAM for concurrent agent execution
- SSD for faster database queries
### Performance
- ~1-5 seconds average response time (depends on LLM)
- Thousands of agents per server instance
- Millions of messages stored efficiently
- Concurrent agent execution supported
```
## Architecture Decision Records
```markdown
## Why This Architecture?
### Stateful Server Design
Unlike frameworks that run in your application, Letta is a separate service:
- **Persistent identity** - Agents exist independently
- **Shared access** - Multiple clients can connect to same agents
- **State isolation** - Client logic separated from agent logic
- **Easier debugging** - Centralized state inspection
### Database-Backed
All state in PostgreSQL/SQLite:
- **Durability** - Agents survive server restarts
- **Portability** - Export agents to move between servers
- **Auditability** - Complete history preserved
- **Multi-tenancy** - Secure isolation between users
### Pluggable LLMs
Model-agnostic design:
- **Provider flexibility** - Switch between OpenAI, Anthropic, local, etc.
- **No lock-in** - Your agent data is portable
- **Cost optimization** - Use cheaper models where appropriate
- **Future-proof** - New models work without code changes
### Sandbox Tool Execution
Tools run in isolation:
- **Security** - Untrusted code can't access server
- **Resource limits** - CPU, memory, time constraints
- **Reliability** - One tool crash doesn't kill agent
- **Debugging** - Tool failures are captured and logged
```
## Usage Notes
- Place the **main architecture diagram** on the platform overview page
- Use the **deployment diagram** in the self-hosting guide
- The **data flow diagram** helps debug issues
- The explanation text clarifies why Letta is architected this way

View File

@@ -0,0 +1,214 @@
# Tool Execution Lifecycle
**Location:** Add to `fern/pages/agents/tools.mdx` near the beginning
**What it shows:** How tools are registered, called by agents, executed, and return results.
## Diagram Code
```mermaid
sequenceDiagram
participant Dev as Developer
participant Server as Letta Server
participant Agent as Agent Runtime
participant LLM
participant Sandbox as Tool Sandbox
Note over Dev,Server: 1. Tool Registration
Dev->>Server: Create tool from function
Note over Dev,Server: def my_tool(arg: str) -> str:<br/> """Tool description"""<br/> return result
Server->>Server: Parse docstring
Server->>Server: Generate JSON schema
Note over Server: {<br/> "name": "my_tool",<br/> "parameters": {...}<br/>}
Server->>Server: Store in database
Note over Dev,Server: 2. Attach to Agent
Dev->>Server: Attach tool to agent
Server->>Agent: Update agent config
rect rgb(240, 248, 255)
Note over Agent,Sandbox: 3. Runtime Execution
Agent->>LLM: Send prompt + tools
Note over LLM: Available tools in context
LLM-->>Agent: Tool call decision
Note over Agent: {<br/> "name": "my_tool",<br/> "arguments": {"arg": "value"}<br/>}
Agent->>Agent: Validate arguments
Agent->>Agent: Save tool_call_message
Agent->>Sandbox: Execute in sandbox
Note over Sandbox: Isolated execution<br/>Resource limits applied
Sandbox-->>Agent: Return result
Agent->>Agent: Save tool_return_message
Agent->>LLM: Continue with result
Note over Agent,LLM: Result added to context
end
```
## Alternative: Simplified Flow
```mermaid
flowchart TD
Start([User message]) --> Think{Agent thinks}
Think -->|Need information| Tool[Call tool]
Think -->|Can respond| End([Send message])
Tool --> Execute[Execute in sandbox]
Execute --> Result[Get result]
Result --> Think
style Tool fill:#fff9c4
style Execute fill:#e1bee7
style Result fill:#c8e6c9
```
## Tool Types Diagram
```mermaid
graph TB
subgraph Built-in["🔧 Built-in Tools"]
MEM[Memory Tools<br/>edit_memory, etc.]
SEND[send_message<br/>Respond to user]
SEARCH[web_search<br/>Search internet]
CODE[run_code<br/>Execute code]
end
subgraph Custom["⚙️ Custom Tools"]
PYTHON[Python Functions<br/>Your code]
MCP[MCP Tools<br/>External servers]
COMP[Composio Tools<br/>SaaS integrations]
end
Agent[Agent] --> Built-in
Agent --> Custom
style Built-in fill:#e3f2fd
style Custom fill:#fff9c4
```
## Explanation to Add
```markdown
## How Tools Work
### 1. Tool Registration
When you create a tool, Letta:
- Parses your function signature and docstring
- Generates an OpenAI-compatible JSON schema
- Stores the tool code and schema in the database
Example:
```python
def search_database(query: str) -> list:
"""
Search the product database.
Args:
query (str): Search query
Returns:
list: Matching products
"""
# Your implementation
return results
```
Becomes:
```json
{
"name": "search_database",
"description": "Search the product database.",
"parameters": {
"type": "object",
"properties": {
"query": {"type": "string", "description": "Search query"}
},
"required": ["query"]
}
}
```
### 2. Tool Context
When an agent processes a message:
- All attached tool schemas are included in the LLM context
- The LLM decides whether to call a tool or respond directly
- The LLM generates structured tool call arguments
### 3. Execution
When the agent calls a tool:
- **Arguments are validated** against the schema
- **Tool is executed** in an isolated sandbox (for security)
- **Result is returned** and added to the agent's context
- **Agent continues thinking** with the new information
### 4. Security
Tools run in a sandbox with:
- **Resource limits** (CPU, memory, time)
- **Isolated environment** (can't access other agents or server)
- **Restricted imports** (configurable)
- **Execution timeout** (prevents infinite loops)
### 5. Tool Types
#### Memory Tools (Built-in, Always Attached)
- `core_memory_append` - Add to memory block
- `core_memory_replace` - Update memory block
- `archival_memory_insert` - Store long-term facts
- `archival_memory_search` - Retrieve facts
- `conversation_search` - Search message history
#### Communication Tools (Built-in, Default)
- `send_message` - Respond to the user
#### Utility Tools (Built-in, Optional)
- `web_search` - Search the web (Letta Cloud includes credits)
- `run_code` - Execute code in multiple languages
#### Custom Tools
- **Python functions** - Your own code
- **MCP tools** - Connect to MCP servers
- **Composio tools** - Pre-built SaaS integrations
## Tool Call Flow Example
```
User: "What's the weather in SF?"
Agent thinks: "I need weather data"
Agent calls: web_search("weather san francisco")
Tool executes: Returns "Sunny, 72°F"
Agent thinks: "I have the information"
Agent calls: send_message("It's sunny and 72°F in San Francisco!")
User receives: "It's sunny and 72°F in San Francisco!"
```
## Tool Best Practices
1. **Clear descriptions** - The LLM relies on these to decide when to call tools
2. **Typed arguments** - Use type hints for automatic schema generation
3. **Error handling** - Return informative error messages
4. **Idempotency** - Tools may be called multiple times
5. **Performance** - Keep tool execution fast (< 5 seconds)
```
## Usage Notes
- Place the **sequence diagram** early in the tools documentation
- Use the **simplified flow** in the quickstart
- The **tool types diagram** helps users understand what's available
- The explanation clarifies the "magic" of tool execution

View File

@@ -3,6 +3,8 @@ instances:
custom-domain: https://docs.letta.com
title: Letta
default-language: typescript
experimental:
openapi-parser-v3: true
@@ -72,6 +74,8 @@ navigation:
path: pages/getting-started/letta_platform.mdx
- page: Quickstart
path: pages/getting-started/quickstart.mdx
- page: Core Concepts
path: pages/getting-started/core-concepts.mdx
- page: Prompts for Vibecoding
path: pages/getting-started/prompts.mdx
#- section: Supported Frameworks
@@ -91,7 +95,7 @@ navigation:
# - page: Mastra
# path: pages/frameworks/mastra.mdx
- section: Stateful Agents
- section: Agents
contents:
- page: Overview
path: pages/agents/overview.mdx
@@ -100,8 +104,6 @@ navigation:
contents:
- page: MemGPT Agents
path: pages/agents/memgpt_agents.mdx
- page: Sleep-time Agents
path: pages/agents/sleep_time_agents.mdx
- page: Low-latency (voice) Agents
path: pages/agents/low_latency_agents.mdx
- page: ReAct Agents
@@ -112,49 +114,38 @@ navigation:
path: pages/agents/stateful_workflows.mdx
- page: Context Hierarchy
path: pages/agents/context_hierarchy.mdx
- page: Message Types
path: pages/agents/message_types.mdx
- page: Heartbeats
path: pages/agents/heartbeats.mdx
- section: Memory
path: pages/agents/memory.mdx
- section: Memory
path: pages/agents/memory.mdx
contents:
- page: Memory Blocks
path: pages/agents/memory_blocks.mdx
- section: Archival Memory
path: pages/agents/archival_memory_overview.mdx
contents:
- page: Memory Blocks
path: pages/agents/memory_blocks.mdx
- page: Agentic Context Engineering
path: pages/agents/context_engineering.mdx
- page: Filesystem
path: pages/agents/filesystem.mdx
- page: Searching & Querying
path: pages/agents/archival_search.mdx
- page: Best Practices
path: pages/agents/archival_best_practices.mdx
- page: Agentic Context Engineering
path: pages/agents/context_engineering.mdx
- section: Agent Capabilities
contents:
- page: Streaming Responses
path: pages/agents/streaming.mdx
- page: Long-Running Executions
path: pages/agents/long_running.mdx
- page: JSON Mode & Structured Output
path: pages/agents/json_mode.mdx
- page: Human-in-the-Loop
path: pages/agents/human_in_the_loop.mdx
- page: Multi-Modal
path: pages/agents/multimodal.mdx
- section: Multi-Agent
path: pages/agents/multiagent.mdx
contents:
- page: Custom Multi-Agent Tools
path: pages/agents/multiagent_custom.mdx
- page: Multi-Agent Shared Memory
path: pages/agents/multiagent_memory.mdx
- page: Groups
path: pages/agents/groups.mdx
- page: Multi-User (Identities)
path: pages/agents/multiuser.mdx
- page: Agent File (.af)
path: pages/agents/agentfile.mdx
- page: Scheduling
path: pages/agents/scheduling.mdx
- section: Voice Agents
path: pages/voice/voice.mdx
contents:
- page: Connecting to LiveKit Agents
path: pages/voice/voice_livekit.mdx
- page: Connecting to Vapi
path: pages/voice/voice_vapi.mdx
- page: Filesystem
path: pages/agents/filesystem.mdx
- section: Tool Use
contents:
@@ -168,9 +159,6 @@ navigation:
path: pages/agents/tool_rules.mdx
- page: Tool Variables
path: pages/agents/tool_variables.mdx
- page: Composio Integration
path: pages/agents/composio.mdx
hidden: true
- section: Model Context Protocol
path: pages/mcp/overview.mdx
contents:
@@ -181,6 +169,51 @@ navigation:
- page: Local (stdio) Servers
path: pages/mcp/stdio.mdx
- section: Configuration
contents:
- page: Multi-User (Identities)
path: pages/agents/multiuser.mdx
- page: Agent File (.af)
path: pages/agents/agentfile.mdx
- page: Scheduling
path: pages/agents/scheduling.mdx
- section: Multi-Agent
path: pages/agents/multiagent.mdx
contents:
- page: Custom Multi-Agent Tools
path: pages/agents/multiagent_custom.mdx
- page: Multi-Agent Shared Memory
path: pages/agents/multiagent_memory.mdx
- section: Experimental
contents:
- page: Groups
path: pages/agents/groups.mdx
- page: Human-in-the-Loop
path: pages/agents/human_in_the_loop.mdx
- page: Sleep-time Agents
path: pages/agents/sleep_time_agents.mdx
- section: Voice Agents
path: pages/voice/voice.mdx
contents:
- page: Connecting to LiveKit Agents
path: pages/voice/voice_livekit.mdx
- page: Connecting to Vapi
path: pages/voice/voice_vapi.mdx
- section: Integrations
contents:
- page: Telegram Bot
path: pages/tutorials/telegram_bot.mdx
- section: Research Background
contents:
- page: Letta and MemGPT
path: pages/concepts/letta.mdx
- page: MemGPT Paper Deep Dive
path: pages/concepts/memgpt.mdx
#- section: Tool Execution
# contents:
# - page: Overview
@@ -249,12 +282,6 @@ navigation:
# path: pages/cloud/variables.mdx
# - page: Versioning
# path: pages/cloud/versions.mdx
- section: Key Concepts
contents:
- page: Letta concepts
path: pages/concepts/letta.mdx
- page: MemGPT concepts
path: pages/concepts/memgpt.mdx
- section: Additional Resources
contents:
- page: Letta Desktop Troubleshooting
@@ -582,6 +609,10 @@ navigation:
contents:
- page: Async Multi-Agent
path: pages/tutorials/multiagent_async.mdx
- section: Agent Templates
contents:
- page: Building customer-specific relationship agents
path: pages/tutorials/customer-specific-agents.mdx
- tab: leaderboard
layout:

View File

@@ -1,4 +1,4 @@
{
"organization": "letta",
"version": "0.77.3"
"version": "0.83.0"
}

Binary file not shown.

After

Width:  |  Height:  |  Size: 73 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 52 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 84 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 125 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 131 KiB

BIN
fern/images/attach-tool.png Normal file

Binary file not shown.

After

Width:  |  Height:  |  Size: 92 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 64 KiB

BIN
fern/images/exa-api.png Normal file

Binary file not shown.

After

Width:  |  Height:  |  Size: 71 KiB

BIN
fern/images/exa-tools.png Normal file

Binary file not shown.

After

Width:  |  Height:  |  Size: 154 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 174 KiB

BIN
fern/images/gmail-tools.png Normal file

Binary file not shown.

After

Width:  |  Height:  |  Size: 68 KiB

BIN
fern/images/mcp-options.png Normal file

Binary file not shown.

After

Width:  |  Height:  |  Size: 155 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 92 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 104 KiB

BIN
fern/images/zap-new-mcp.png Normal file

Binary file not shown.

After

Width:  |  Height:  |  Size: 51 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 111 KiB

View File

@@ -1127,6 +1127,9 @@ paths:
/v1/_internal_templates/blocks:
post:
x-fern-ignore: true
/v1/_internal_templates/blocks/batch:
post:
x-fern-ignore: true
/v1/projects:
get:
x-fern-sdk-group-name:

File diff suppressed because it is too large Load Diff

View File

@@ -6,6 +6,7 @@
"prepare-openapi": "ts-node ./scripts/prepare-openapi.ts"
},
"dependencies": {
"fern-api": "^0.83.0",
"ts-node": "^10.9.2",
"typescript": "^5.3.3"
}

View File

@@ -66,20 +66,7 @@ Upload downloaded `.af` files directly through the ADE interface to easily re-cr
</Frame>
<CodeGroup>
```python title="python" maxLines=50
# Install SDK with `pip install letta-client`
from letta_client import Letta
# Create a client to connect to Letta
client = Letta(token="LETTA_API_KEY")
# Import your .af file from any location
agent_state = client.agents.import_agent_serialized(file=open("/path/to/agent/file.af", "rb"))
print(f"Imported agent: {agent_state.id}")
```
```typescript title="node.js" maxLines=50
```typescript TypeScript maxLines=50
// Install SDK with `npm install @letta-ai/letta-client`
import { LettaClient } from '@letta-ai/letta-client'
import { readFileSync } from 'fs';
@@ -95,6 +82,19 @@ const agentState = await client.agents.importAgentSerialized(file, {})
console.log(`Imported agent: ${agentState.id}`);
```
```python title="python" maxLines=50
# Install SDK with `pip install letta-client`
from letta_client import Letta
# Create a client to connect to Letta
client = Letta(token="LETTA_API_KEY")
# Import your .af file from any location
agent_state = client.agents.import_agent_serialized(file=open("/path/to/agent/file.af", "rb"))
print(f"Imported agent: {agent_state.id}")
```
```curl curl
curl -X POST "https://app.letta.com/v1/agents/import" \
-H "Authorization: Bearer LETTA_API_KEY" \
@@ -111,6 +111,17 @@ You can export your own `.af` files to share by selecting "Export Agent" in the
</Frame>
<CodeGroup>
```typescript TypeScript maxLines=50
// Install SDK with `npm install @letta-ai/letta-client`
import { LettaClient } from '@letta-ai/letta-client'
// Create a client to connect to Letta
const client = new LettaClient({ token: "LETTA_API_KEY" });
// Export your agent into a serialized schema object (which you can write to a file)
const schema = await client.agents.exportAgentSerialized("<AGENT_ID>");
```
```python title="python" maxLines=50
# Install SDK with `pip install letta-client`
from letta_client import Letta
@@ -122,17 +133,6 @@ client = Letta(token="LETTA_API_KEY")
schema = client.agents.export_agent_serialized(agent_id="<AGENT_ID>")
```
```typescript title="node.js" maxLines=50
// Install SDK with `npm install @letta-ai/letta-client`
import { LettaClient } from '@letta-ai/letta-client'
// Create a client to connect to Letta
const client = new LettaClient({ token: "LETTA_API_KEY" });
// Export your agent into a serialized schema object (which you can write to a file)
const schema = await client.agents.exportAgentSerialized("<AGENT_ID>");
```
```curl curl
curl -X GET "https://app.letta.com/v1/agents/{AGENT_ID}/export" \
-H "Authorization: Bearer LETTA_API_KEY"

View File

@@ -0,0 +1,363 @@
---
title: Best Practices
subtitle: Patterns, pitfalls, and advanced usage
slug: guides/agents/archival-best-practices
---
## Backfilling archives
You can pre-load archival memory with existing knowledge:
<CodeGroup>
```typescript TypeScript
// Load company policies
const policies = [
"All replicants must undergo Voight-Kampff testing upon arrival",
"Blade Runner units are authorized to retire rogue replicants",
"Tyrell Corporation employees must report suspected replicants immediately"
];
for (const policy of policies) {
await client.agents.passages.insert(agent.id, {
content: policy,
tags: ["policy", "company", "protocol"]
});
}
// Load technical documentation
const docs = [
{
content: "Nexus-6 replicants: Superior strength, agility, and intelligence. Four-year lifespan prevents emotional development.",
tags: ["technical", "nexus-6", "specifications"]
},
{
content: "Voight-Kampff test: Measures capillary dilation, blush response, and pupil dilation to detect replicants.",
tags: ["technical", "testing", "voight-kampff"]
}
];
for (const doc of docs) {
await client.agents.passages.insert(agent.id, {
content: doc.content,
tags: doc.tags
});
}
```
```python Python
# Load company policies
policies = [
"All replicants must undergo Voight-Kampff testing upon arrival",
"Blade Runner units are authorized to retire rogue replicants",
"Tyrell Corporation employees must report suspected replicants immediately"
]
for policy in policies:
client.agents.passages.insert(
agent_id=agent.id,
content=policy,
tags=["policy", "company", "protocol"]
)
# Load technical documentation
docs = [
{
"content": "Nexus-6 replicants: Superior strength, agility, and intelligence. Four-year lifespan prevents emotional development.",
"tags": ["technical", "nexus-6", "specifications"]
},
{
"content": "Voight-Kampff test: Measures capillary dilation, blush response, and pupil dilation to detect replicants.",
"tags": ["technical", "testing", "voight-kampff"]
}
]
for doc in docs:
client.agents.passages.insert(
agent_id=agent.id,
content=doc["content"],
tags=doc["tags"]
)
```
</CodeGroup>
**Use cases for backfilling:**
- Migrating knowledge bases to Letta
- Seeding specialized agents with domain knowledge
- Loading historical conversation logs
- Importing research libraries
## Enforcing archival usage with tool rules
If your agent forgets to use archival memory, you should first try prompting the agent to use it more consistently. If prompting alone doesn't work, you can enforce archival usage with [tool rules](/guides/agents/tool-rules).
**Force archival search at turn start:**
<CodeGroup>
```typescript TypeScript
await client.agents.update(agent.id, {
toolRules: [
{ type: "init", toolName: "archival_memory_search" }
]
});
```
```python Python
from letta_client.types import InitToolRule
client.agents.update(
agent_id=agent.id,
tool_rules=[
InitToolRule(tool_name="archival_memory_search")
]
)
```
</CodeGroup>
**Require archival insertion before exit:**
<CodeGroup>
```typescript TypeScript
await client.agents.update(agent.id, {
toolRules: [
{
type: "child",
toolName: "send_message",
children: ["archival_memory_insert"]
}
]
});
```
```python Python
from letta_client.types import ChildToolRule
client.agents.update(
agent_id=agent.id,
tool_rules=[
ChildToolRule(
tool_name="send_message",
children=["archival_memory_insert"]
)
]
)
```
</CodeGroup>
<Info>
**Using the ADE:** Tool rules can also be configured in the Agent Development Environment's Tool Manager interface.
</Info>
<Warning>
**Note:** Anthropic models don't support strict structured output, so tool rules may not be enforced. Use OpenAI or Gemini models for guaranteed tool rule compliance.
</Warning>
**When to use tool rules:**
- Knowledge management agents that should always search context
- Agents that need to learn from every interaction
- Librarian/archivist agents focused on information storage
**Latency considerations:** Forcing archival search adds a tool call at the start of every turn. For latency-sensitive applications (like customer support), consider making archival search optional.
[Learn more about tool rules →](/guides/agents/tool-rules)
## Best practices
**1. Avoid over-insertion**
The most common pitfall is inserting too many memories, creating clutter. Trust the agent to decide what's worth storing long-term.
**2. Create an archival policies block**
Help your agent learn how to use archival memory effectively by creating a dedicated memory block for archival usage policies:
<CodeGroup>
```typescript TypeScript
await client.blocks.create({
label: "archival_policies",
value: `
When to insert into archival:
- User preferences and important facts about the user
- Technical specifications and reference information
- Significant decisions or outcomes from conversations
When NOT to insert:
- Temporary conversational context
- Information already stored
- Trivial details or pleasantries
Search strategies:
- Use natural language questions for best results
- Include tags when filtering by category
- Try semantic variations if first search doesn't find what you need
`
});
```
```python Python
client.blocks.create(
label="archival_policies",
value="""
When to insert into archival:
- User preferences and important facts about the user
- Technical specifications and reference information
- Significant decisions or outcomes from conversations
When NOT to insert:
- Temporary conversational context
- Information already stored
- Trivial details or pleasantries
Search strategies:
- Use natural language questions for best results
- Include tags when filtering by category
- Try semantic variations if first search doesn't find what you need
"""
)
```
</CodeGroup>
You can improve this block through conversation with your agent:
> **You:** "I noticed you didn't store the fact that I prefer TypeScript for backend development. Update your archival policies block to ensure you capture language preferences in the future."
> **Agent:** Updates the archival_policies block to include "Programming language preferences" under "When to insert into archival"
This collaborative approach helps agents learn from mistakes and improve their archival memory usage over time.
**3. Track query effectiveness**
Build self-improving agents by having them track archival search effectiveness in a memory block. This allows agents to learn which query patterns work best and refine their search strategies over time.
<CodeGroup>
```typescript TypeScript
// Create a memory block for tracking
await client.blocks.create({
label: "archival_tracking",
value: `
Query patterns: Natural language questions work best
Recent searches: "test procedures" (3 results), "replicant specs" (5 results)
Success rate: ~85% of searches return relevant results
Frequently searched topics: [technical specifications, protocols, case histories]
Common patterns: Queries about technical specs work better than vague questions
Improvements needed: Add more tags for better filtering
`
});
```
```python Python
# Create a memory block for tracking
client.blocks.create(
label="archival_tracking",
value="""
Query patterns: Natural language questions work best
Recent searches: "test procedures" (3 results), "replicant specs" (5 results)
Success rate: ~85% of searches return relevant results
Frequently searched topics: [technical specifications, protocols, case histories]
Common patterns: Queries about technical specs work better than vague questions
Improvements needed: Add more tags for better filtering
"""
)
```
</CodeGroup>
The agent can update this block based on search results and continuously refine its archival strategy.
**4. Let agents experiment**
Agents can test different query styles to understand what works:
<CodeGroup>
```typescript TypeScript
// Agent tries variations
await archivalMemorySearch({query: "How does the Voight-Kampff test work?"})
await archivalMemorySearch({query: "Voight-Kampff procedure"})
await archivalMemorySearch({query: "replicant detection method"})
```
```python Python
# Agent tries variations
archival_memory_search(query="How does the Voight-Kampff test work?")
archival_memory_search(query="Voight-Kampff procedure")
archival_memory_search(query="replicant detection method")
```
</CodeGroup>
**Important:** Have the agent persist learnings from experimentation in a memory block (like `archival_tracking` or `archival_policies`), not in archival itself (avoid meta-clutter).
**5. Use tags consistently**
Establish a tag taxonomy and stick to it. Good language models typically handle tagging well.
**6. Add context to insertions**
❌ Don't: "Likes replicants"
✅ Do: "Deckard shows unusual empathy toward replicants, particularly Rachael, suggesting possible replicant identity"
**7. Pre-load domain knowledge**
For specialized agents, seed archival with relevant information upfront via backfilling.
**8. Consider latency**
Forced archival search adds overhead. For real-time applications, make it optional or use it selectively.
## Modifying archival memories (SDK only)
While agents cannot modify archival memories, developers can update or delete them via the SDK:
<CodeGroup>
```typescript TypeScript
// Update a memory
await client.agents.passages.update(agent.id, passage.id, {
content: "Updated content",
tags: ["new", "tags"]
});
// Delete a memory
await client.agents.passages.delete(agent.id, passage.id);
```
```python Python
# Update a memory
client.agents.passages.update(
agent_id=agent.id,
passage_id=passage.id,
content="Updated content",
tags=["new", "tags"]
)
# Delete a memory
client.agents.passages.delete(
agent_id=agent.id,
passage_id=passage.id
)
```
</CodeGroup>
This allows you to:
- Fix incorrect information
- Update outdated facts
- Remove sensitive or irrelevant data
- Reorganize tag structures
## Next steps
<CardGroup cols={2}>
<Card
title="Searching & Querying"
href="/guides/agents/archival-search"
>
Learn how to search archival memory effectively
</Card>
<Card
title="Archival Memory Overview"
href="/guides/agents/archival-memory"
>
Back to archival memory overview
</Card>
<Card
title="Memory Blocks"
href="/guides/agents/memory-blocks"
>
Learn about always-visible memory
</Card>
<Card
title="Tool Rules"
href="/guides/agents/tool-rules"
>
Advanced tool execution constraints
</Card>
</CardGroup>

View File

@@ -0,0 +1,193 @@
---
title: Archival Memory
subtitle: Long-term semantic storage for agent knowledge
slug: guides/agents/archival-memory
---
## What is archival memory?
Archival memory is a semantically searchable database where agents store facts, knowledge, and information for long-term retrieval. Unlike memory blocks that are always visible, archival memory is queried on-demand when relevant.
**Key characteristics:**
- **Agent-immutable** - Agents cannot easily modify or delete archival memories (though developers can via SDK)
- **Unlimited storage** - No practical size limits
- **Semantic search** - Find information by meaning, not exact keywords
- **Tagged organization** - Agents can categorize memories with tags
**Best for:** Event descriptions, reports, articles, historical records, and reference material that doesn't change frequently.
## When to use archival memory
**Use archival memory for:**
- Document repositories (API docs, technical guides, research papers)
- Conversation logs beyond the context window
- Customer interaction history and support tickets
- Reports, articles, and written content
- Code examples and technical references
- Training materials and educational content
- User research data and feedback
- Historical records and event logs
**Don't use archival memory for:**
- Information that should always be visible → Use memory blocks
- Frequently changing state → Use memory blocks
- Current working memory → Use scratchpad blocks
- Information that needs frequent modification → Use memory blocks
## How agents interact with archival memory
Agents have two primary tools for archival memory: `archival_memory_insert` and `archival_memory_search`.
### Inserting information
Agents can insert memories during conversations:
<CodeGroup>
```typescript TypeScript
// Agent inserts after learning something
archival_memory_insert(
content: "Deckard retired six replicants in the off-world colonies before returning to Los Angeles",
tags: ["replicant", "history", "retirement"]
)
```
```python Python
# Agent inserts after learning something
archival_memory_insert(
content="Deckard retired six replicants in the off-world colonies before returning to Los Angeles",
tags=["replicant", "history", "retirement"]
)
```
</CodeGroup>
Developers can also insert programmatically:
<CodeGroup>
```typescript TypeScript
await client.agents.passages.insert(agent.id, {
content: "The Tyrell Corporation's motto: 'More human than human'",
tags: ["company", "motto", "tyrell"]
});
```
```python Python
client.agents.passages.insert(
agent_id=agent.id,
content="The Tyrell Corporation's motto: 'More human than human'",
tags=["company", "motto", "tyrell"]
)
```
</CodeGroup>
### Searching for information
<CodeGroup>
```typescript TypeScript
// Agent searches semantically
const results = archival_memory_search(
query: "replicant lifespan",
tags: ["technical"], // Optional: filter by tags
page: 0
)
```
```python Python
# Agent searches semantically
results = archival_memory_search(
query="replicant lifespan",
tags=["technical"], # Optional: filter by tags
page=0
)
```
</CodeGroup>
Results return **semantically relevant** information - meaning the search understands concepts and meaning, not just exact keywords. For example, searching for "artificial memories" will find "implanted memories" even though the exact words don't match.
[Learn more about search and querying →](/guides/agents/archival-search)
## Real-world examples
### Example 1: Personal knowledge manager
An agent with 30k+ archival memories tracking:
- Personal preferences and history
- Technical learnings and insights
- Article summaries and research notes
- Conversation highlights
### Example 2: Social media agent
An agent with 32k+ memories tracking interactions:
- User preferences and conversation history
- Common topics and interests
- Interaction patterns and communication styles
- Tags by user, topic, and interaction type
### Example 3: Customer support agent
- Stores ticket resolutions and common issues
- Tags by product, issue type, priority
- Searches archival for similar past issues
- Learns from successful resolutions over time
### Example 4: Research assistant
- Stores paper summaries with key findings
- Tags by topic, methodology, author
- Cross-references related research
- Builds a semantic knowledge graph
## Archival memory vs other memory types
| Feature | Memory Blocks | Archival Memory | Conversation Search |
|---------|--------------|-----------------|-------------------|
| **Always visible** | ✅ Yes | ❌ No (searched) | ❌ No (searched) |
| **Search type** | N/A | Semantic | Full-text + semantic |
| **Storage limit** | Character limit | Unlimited | Unlimited |
| **Agent modifiable** | ✅ Full edit control | ❌ Insert + search only | ❌ Search only |
| **SDK modifiable** | ✅ Yes | ✅ Yes | ❌ No |
| **Use case** | Current state | Long-term facts | Past messages |
| **Best for** | Active context | Historical records | Conversation history |
### When to use archival vs conversation search
<Tip>
**Archival memory** is for **intentional** storage:
- Agents decide what's worth remembering long-term
- Used for facts, knowledge, and reference material
- Curated by the agent through active insertion
**Conversation search** is for **historical** retrieval:
- Searches through actual past messages
- Used to recall what was said in previous conversations
- Automatic - no agent curation needed
**Example:**
- User says: "I prefer Python for data science projects"
- **Archival:** Agent inserts "User prefers Python for data science" as a fact
- **Conversation search:** Agent can search for the original message later
Use archival for structured knowledge, conversation search for historical context.
</Tip>
## Next steps
<CardGroup cols={2}>
<Card
title="Searching & Querying"
href="/guides/agents/archival-search"
>
Learn how to write effective queries and filter results
</Card>
<Card
title="Best Practices"
href="/guides/agents/archival-best-practices"
>
Patterns, pitfalls, and advanced usage
</Card>
<Card
title="Memory Blocks"
href="/guides/agents/memory-blocks"
>
Learn about always-visible memory
</Card>
<Card
title="Agent Memory Overview"
href="/guides/agents/memory"
>
Understand Letta's memory system
</Card>
</CardGroup>

View File

@@ -0,0 +1,264 @@
---
title: Searching & Querying
subtitle: How to search archival memory effectively
slug: guides/agents/archival-search
---
## Search result format
<Info>
**What agents receive:** Each result contains:
- `content` - The stored text
- `tags` - Associated tags
- `timestamp` - When the memory was created
- `relevance` - Scoring with `rrf_score`, `vector_rank`, `fts_rank`
Letta uses **hybrid search** combining semantic (vector) and keyword (full-text) search, ranked using Reciprocal Rank Fusion (RRF). Higher `rrf_score` means more relevant.
</Info>
## Writing effective queries
Letta uses OpenAI's `text-embedding-3-small` model, which handles natural language questions well. Agents can use various query styles:
**Natural language questions work best:**
<CodeGroup>
```typescript TypeScript
await archivalMemorySearch({query: "How does the test work?"})
// Returns: "The Voight-Kampff test measures involuntary emotional responses..."
```
```python Python
archival_memory_search(query="How does the test work?")
# Returns: "The Voight-Kampff test measures involuntary emotional responses..."
```
</CodeGroup>
**Keywords also work:**
<CodeGroup>
```typescript TypeScript
await archivalMemorySearch({query: "replicant lifespan"})
// Returns memories containing both keywords and semantically related concepts
```
```python Python
archival_memory_search(query="replicant lifespan")
# Returns memories containing both keywords and semantically related concepts
```
</CodeGroup>
**Concept-based queries leverage semantic understanding:**
<CodeGroup>
```typescript TypeScript
await archivalMemorySearch({query: "artificial memories"})
// Returns: "...experimental replicant with implanted memories..."
// (semantic match despite different terminology)
```
```python Python
archival_memory_search(query="artificial memories")
# Returns: "...experimental replicant with implanted memories..."
# (semantic match despite different terminology)
```
</CodeGroup>
<Tip>
**Pagination:** Agents receive multiple results per search. If an agent doesn't paginate correctly, you can instruct it to adjust the `page` parameter or remind it to iterate through results.
</Tip>
## Filtering by time
Agents can search by date ranges:
<CodeGroup>
```typescript TypeScript
// Recent memories
await archivalMemorySearch({
query: "test results",
startDatetime: "2025-09-29T00:00:00"
})
// Specific time window
await archivalMemorySearch({
query: "replicant cases",
startDatetime: "2025-09-29T00:00:00",
endDatetime: "2025-09-30T23:59:59"
})
```
```python Python
# Recent memories
archival_memory_search(
query="test results",
start_datetime="2025-09-29T00:00:00"
)
# Specific time window
archival_memory_search(
query="replicant cases",
start_datetime="2025-09-29T00:00:00",
end_datetime="2025-09-30T23:59:59"
)
```
</CodeGroup>
<Info>
**Agent datetime awareness:**
- Agents know the current day but not the current time
- Agents can see timestamps of messages they've received
- Agents cannot control insertion timestamps (automatic)
- Developers can backdate memories via SDK with `created_at`
- Time filtering enables queries like "what did we discuss last week?"
</Info>
## Tags and organization
Tags help agents organize and filter archival memories. **Agents always know what tags exist in their archive** since tag lists are compiled into the context window.
**Common tag patterns:**
- `user_info`, `professional`, `personal_history`
- `documentation`, `technical`, `reference`
- `conversation`, `milestone`, `event`
- `company_policy`, `procedure`, `guideline`
**Tag search modes:**
- Match any tag
- Match all tags
- Filter by date ranges
Example of organized tagging:
<CodeGroup>
```typescript TypeScript
// Atomic memory with precise tags
await archivalMemoryInsert({
content: "Nexus-6 replicants have a four-year lifespan",
tags: ["technical", "replicant", "nexus-6"]
})
// Later, easy retrieval
await archivalMemorySearch({
query: "how long do replicants live",
tags: ["technical"]
})
```
```python Python
# Atomic memory with precise tags
archival_memory_insert(
content="Nexus-6 replicants have a four-year lifespan",
tags=["technical", "replicant", "nexus-6"]
)
# Later, easy retrieval
archival_memory_search(
query="how long do replicants live",
tags=["technical"]
)
```
</CodeGroup>
## Performance and scale
<Info>
Archival memory has no practical size limits and remains fast at scale:
**Letta Cloud:** Uses [TurboPuffer](https://turbopuffer.com/) for extremely fast semantic search, even with hundreds of thousands of memories.
**Self-hosted:** Uses pgvector (PostgreSQL) for vector search. Performance scales well with proper indexing.
**Letta Desktop:** Uses SQLite with vector search extensions. Suitable for personal use cases.
No matter the backend, archival memory scales to large archives without performance degradation.
</Info>
## Embedding models and search quality
Archival search quality depends on the agent's embedding model:
**Letta Cloud:** All agents use `text-embedding-3-small`, which is optimized for most use cases. This model cannot be changed.
**Self-hosted:** Embedding model is pinned to the agent at creation. The default `text-embedding-3-small` is sufficient for nearly all use cases.
### Changing embedding models (self-hosted only)
To change an agent's embedding model, you must:
1. List and export all archival memories
2. Delete all archival memories
3. Update the agent's embedding model
4. Re-insert all memories (they'll be re-embedded)
<Warning>
Changing embedding models is a destructive operation. Export your archival memories first.
</Warning>
## Programmatic access
You can manage archival memory via the SDK:
<CodeGroup>
```typescript TypeScript
// Insert a memory
await client.agents.passages.insert(agent.id, {
content: "The Voight-Kampff test requires a minimum of 20 cross-referenced questions",
tags: ["technical", "testing", "protocol"]
});
// Search memories
const results = await client.agents.passages.search(agent.id, {
query: "testing procedures",
tags: ["protocol"],
page: 0
});
// List all memories
const passages = await client.agents.passages.list(agent.id, {
limit: 100
});
// Get a specific memory
const passage = await client.agents.passages.get(agent.id, passageId);
```
```python Python
# Insert a memory
client.agents.passages.insert(
agent_id=agent.id,
content="The Voight-Kampff test requires a minimum of 20 cross-referenced questions",
tags=["technical", "testing", "protocol"]
)
# Search memories
results = client.agents.passages.search(
agent_id=agent.id,
query="testing procedures",
tags=["protocol"],
page=0
)
# List all memories
passages = client.agents.passages.list(
agent_id=agent.id,
limit=100
)
# Get a specific memory
passage = client.agents.passages.get(
agent_id=agent.id,
passage_id=passage_id
)
```
</CodeGroup>
## Next steps
<CardGroup cols={2}>
<Card
title="Best Practices"
href="/guides/agents/archival-best-practices"
>
Learn patterns, pitfalls, and advanced usage
</Card>
<Card
title="Archival Memory Overview"
href="/guides/agents/archival-memory"
>
Back to archival memory overview
</Card>
</CardGroup>

View File

@@ -26,7 +26,26 @@ Memory management with sleep-time compute can reduce the latency of your main ag
You can enable agents to modify their own blocks with tools. By default, agents with type `memgpt_v2_agent` will have the tools `memory_insert` and `memory_replace` to allow them to manage values in their own blocks. The legacy tools `core_memory_replace` and `core_memory_append` are deprecated but still available for backwards compatibility for type `memgpt_agent`. You can also make custom modification to blocks by implementing your own custom tools that can access the agent's state by passing in the special `agent_state` parameter into your tools.
Below is an example of a tool that re-writes the entire memory block of an agent with a new string:
```python
<CodeGroup>
```typescript TypeScript
function rethinkMemory(agentState: AgentState, newMemory: string, targetBlockLabel: string): void {
/**
* Rewrite memory block for the main agent, newMemory should contain all current information from the block that is not outdated or inconsistent, integrating any new information, resulting in a new memory block that is organized, readable, and comprehensive.
*
* @param newMemory - The new memory with information integrated from the memory block. If there is no new information, then this should be the same as the content in the source block.
* @param targetBlockLabel - The name of the block to write to.
*
* @returns void - Always returns void as this function does not produce a response.
*/
if (agentState.memory.getBlock(targetBlockLabel) === null) {
agentState.memory.createBlock(targetBlockLabel, newMemory);
}
agentState.memory.updateBlockValue(targetBlockLabel, newMemory);
}
```
```python Python
def rethink_memory(agent_state: "AgentState", new_memory: str, target_block_label: str) -> None:
"""
Rewrite memory block for the main agent, new_memory should contain all current information from the block that is not outdated or inconsistent, integrating any new information, resulting in a new memory block that is organized, readable, and comprehensive.
@@ -45,6 +64,7 @@ def rethink_memory(agent_state: "AgentState", new_memory: str, target_block_labe
agent_state.memory.update_block_value(label=target_block_label, value=new_memory)
return None
```
</CodeGroup>
## Modifying blocks via the API
You can also [modify blocks via the API](/api-reference/agents/blocks/modify) to directly edit agents' context windows and memory. This can be useful in cases where you want to extract the contents of an agents memory some place in your application (for example, a dashboard or memory viewer), or when you want to programatically modify an agents memory state (for example, allowing an end-user to directly correct or modify their agent's memory).
@@ -55,8 +75,32 @@ You can also [modify blocks via the API](/api-reference/agents/blocks/modify) to
Importing the Letta Python client inside a tool is a powerful way to allow agents to interact with other agents, since you can use any of the API endpoints. For example, you could create a custom tool that allows an agent to create another Letta agent.
</Tip>
You can allow agents to modify the blocks of other agents by creating tools that import the Letta Python SDK, then using the block update endpoint:
```python maxLines=50
You can allow agents to modify the blocks of other agents by creating tools that import the Letta SDK, then using the block update endpoint:
<CodeGroup>
```typescript TypeScript
function updateSupervisorBlock(blockLabel: string, newValue: string): void {
/**
* Update the value of a block in the supervisor agent.
*
* @param blockLabel - The label of the block to update.
* @param newValue - The new value for the block.
*
* @returns void - Always returns void as this function does not produce a response.
*/
const { LettaClient } = require('@letta-ai/letta-client');
const client = new LettaClient({
baseUrl: "http://localhost:8283"
});
await client.agents.blocks.modify(
agentId,
blockLabel,
newValue
);
}
```
```python Python
def update_supervisor_block(block_label: str, new_value: str) -> None:
"""
Update the value of a block in the supervisor agent.
@@ -80,3 +124,4 @@ def update_supervisor_block(block_label: str, new_value: str) -> None:
value=new_value
)
```
</CodeGroup>

View File

@@ -60,8 +60,64 @@ tool_from_class = client.tools.add(
)
```
To add this tool using the SDK:
<CodeGroup>
```typescript title="typescript"
import { LettaClient } from '@letta-ai/letta-client';
// create a client to connect to your local Letta server
const client = new LettaClient({
baseUrl: "http://localhost:8283"
});
// create the tool
const toolFromClass = await client.tools.add({
tool: manageInventoryTool,
});
```
```python title="python"
from letta_client import Letta
# create a client to connect to your local Letta server
client = Letta(
base_url="http://localhost:8283"
)
# create the tool
tool_from_class = client.tools.add(
tool=ManageInventoryTool(),
)
```
</CodeGroup>
### Specifying tools via function docstrings
You can create a tool by passing in a function with a [Google Style Python docstring](https://google.github.io/styleguide/pyguide.html#383-functions-and-methods) specifying the arguments and description of the tool:
<CodeGroup>
```typescript title="typescript"
// install letta-client with `npm install @letta-ai/letta-client`
import { LettaClient } from '@letta-ai/letta-client';
// create a client to connect to your local Letta server
const client = new LettaClient({
baseUrl: "http://localhost:8283"
});
// define a function
function rollDice(): string {
const diceRoleOutcome = Math.floor(Math.random() * 20) + 1;
const outputString = `You rolled a ${diceRoleOutcome}`;
return outputString;
}
// create the tool
const tool = await client.tools.createFromFunction({
func: rollDice
});
```
```python title="python" maxLines=50
# install letta_client with `pip install letta-client`
from letta_client import Letta
@@ -93,6 +149,8 @@ tool = client.tools.create_from_function(
func=roll_dice
)
```
</CodeGroup>
The tool creation will return a `Tool` object. You can update the tool with `client.tools.upsert_from_function(...)`.
@@ -170,12 +228,24 @@ def check_order_status(
```
Then, you can define the tool in Letta via the `source_code` parameter:
<CodeGroup>
```typescript title="typescript"
import * as fs from 'fs';
const tool = await client.tools.create({
sourceCode: fs.readFileSync("custom_tool.py", "utf-8")
});
```
```python title="python" maxLines=50
tool = client.tools.create(
source_code = open("custom_tool.py", "r").read()
)
```
Note that in this case, `check_order_status` will become the name of your tool, since it is the last Python function in the file. Make sure it includes a [Google Style Python docstring](https://google.github.io/styleguide/pyguide.html#383-functions-and-methods) to define the tools arguments and description.
</CodeGroup>
Note that in this case, `check_order_status` will become the name of your tool, since it is the last Python function in the file. Make sure it includes a [Google Style Python docstring](https://google.github.io/styleguide/pyguide.html#383-functions-and-methods) to define the tool's arguments and description.
# (Advanced) Accessing Agent State
<Warning>

View File

@@ -63,6 +63,17 @@ To create a folder click the "Filesystem" tab in the bottom-left of the ADE, the
To create a folder, you will need to specify a unique `name` as well as an `EmbeddingConfig`:
<CodeGroup>
```typescript TypeScript
// get an available embedding_config
const embeddingConfigs = await client.embeddingModels.list()
const embeddingConfig = embeddingConfigs[0];
// create the folder
const folder = await client.folders.create({
name: "my_folder",
embeddingConfig: embeddingConfig
});
```
```python title="python"
# get an available embedding_config
embedding_configs = client.embedding_models.list()
@@ -74,17 +85,6 @@ folder = client.folders.create(
embedding_config=embedding_config
)
```
```typescript title="node.js"
// get an available embedding_config
const embeddingConfigs = await client.embeddingModels.list()
const embeddingConfig = embeddingConfigs[0];
// create the folder
const folder = await client.folders.create({
name: "my_folder",
embeddingConfig: embeddingConfig
});
```
</CodeGroup>
Now that you've created the folder, you can start loading data into the folder.
@@ -99,24 +99,7 @@ To upload a file, simply drag and drop the file into the folders tab, or click t
Uploading a file to a folder will create an async job for processing the file, which will split the file into chunks and embed them.
<CodeGroup>
```python title="python"
# upload a file into the folder
job = client.folders.files.upload(
folder_id=folder.id,
file=open("my_file.txt", "rb")
)
# wait until the job is completed
while True:
job = client.jobs.retrieve(job.id)
if job.status == "completed":
break
elif job.status == "failed":
raise ValueError(f"Job failed: {job.metadata}")
print(f"Job status: {job.status}")
time.sleep(1)
```
```typescript title="node.js"
```typescript TypeScript
// upload a file into the folder
const uploadJob = await client.folders.files.upload(
createReadStream("my_file.txt"),
@@ -136,9 +119,35 @@ while (true) {
await new Promise((resolve) => setTimeout(resolve, 1000));
}
```
```python title="python"
# upload a file into the folder
job = client.folders.files.upload(
folder_id=folder.id,
file=open("my_file.txt", "rb")
)
# wait until the job is completed
while True:
job = client.jobs.retrieve(job.id)
if job.status == "completed":
break
elif job.status == "failed":
raise ValueError(f"Job failed: {job.metadata}")
print(f"Job status: {job.status}")
time.sleep(1)
```
</CodeGroup>
Once the job is completed, you can list the files and the generated passages in the folder:
<CodeGroup>
```typescript TypeScript
// list files in the folder
const files = await client.folders.files.list(folder.id);
console.log(`Files in folder: ${files}`);
// list passages in the folder
const passages = await client.folders.passages.list(folder.id);
console.log(`Passages in folder: ${passages}`);
```
```python title="python"
# list files in the folder
files = client.folders.files.list(folder_id=folder.id)
@@ -148,28 +157,19 @@ print(f"Files in folder: {files}")
passages = client.folders.passages.list(folder_id=folder.id)
print(f"Passages in folder: {passages}")
```
```typescript title="node.js"
// list files in the folder
const files = await client.folders.files.list(folder.id);
console.log(`Files in folder: ${files}`);
// list passages in the folder
const passages = await client.folders.passages.list(folder.id);
console.log(`Passages in folder: ${passages}`);
```
</CodeGroup>
## Listing available folders
You can view available folders by listing them:
<CodeGroup>
```typescript TypeScript
// list folders
const folders = await client.folders.list();
```
```python title="python"
# list folders
folders = client.folders.list()
```
```typescript title="node.js"
// list folders
const folders = await client.folders.list();
```
</CodeGroup>
## Connecting a folder to an agent
@@ -188,14 +188,14 @@ You can also attach existing folders by clicking the "attach existing" button in
You can attach a folder to an agent by specifying both the folder and agent IDs:
<CodeGroup>
```typescript TypeScript
await client.agents.folders.attach(agent.id, folder.id);
```
```python title="python"
client.agents.folders.attach(agent_id=agent.id, folder_id=folder.id)
```
```typescript title="node.js"
await client.agents.folders.attach(agent.id, folder.id);
```
</CodeGroup>
Note that your agent and folder must be configured with the same embedding model, to ensure that the agent is able to search accross a common embedding space for archival memory.
Note that your agent and folder must be configured with the same embedding model, to ensure that the agent is able to search across a common embedding space for archival memory.
## Detaching the folder
@@ -207,10 +207,10 @@ To detach a folder from an agent, click the "detach" button in the folders tab.
Detaching a folder will remove the files from the agent's context window:
<CodeGroup>
```typescript TypeScript
await client.agents.folders.detach(agent.id, folder.id);
```
```python title="python"
client.agents.folders.detach(agent_id=agent.id, folder_id=folder.id)
```
```typescript title="node.js"
await client.agents.folders.detach(agent.id, folder.id);
```
</CodeGroup>

View File

@@ -4,9 +4,9 @@ subtitle: Coordinate multiple agents with different communication patterns
slug: guides/agents/groups
---
<Callout type="info" emoji="🚀">
Groups are a new feature in Letta and the specification is actively evolving. If you need support, please chat with us on [Discord](https://discord.gg/letta).
</Callout>
<Warning>
Groups support is experimental and may be unstable. For more information, visit our [Discord](https://discord.gg/letta).
</Warning>
Groups enable sophisticated multi-agent coordination patterns in Letta. Each group type provides a different communication and execution pattern, allowing you to choose the right architecture for your multi-agent system.
@@ -80,54 +80,7 @@ sequenceDiagram
### Code Example
<CodeGroup>
```python title="python" maxLines=50
from letta_client import Letta, SleeptimeManager
client = Letta()
# Create main conversation agent
main_agent = client.agents.create(
model="openai/gpt-4.1",
memory_blocks=[
{"label": "persona", "value": "I am the main conversation agent"}
]
)
# Create sleeptime agents for background tasks
monitor_agent = client.agents.create(
model="openai/gpt-4.1",
memory_blocks=[
{"label": "persona", "value": "I monitor conversation sentiment and key topics"}
]
)
summary_agent = client.agents.create(
model="openai/gpt-4.1",
memory_blocks=[
{"label": "persona", "value": "I create periodic summaries of the conversation"}
]
)
# Create a Sleeptime group
group = client.groups.create(
agent_ids=[monitor_agent.id, summary_agent.id],
description="Background agents that process conversation periodically",
manager_config=SleeptimeManager(
manager_agent_id=main_agent.id,
sleeptime_agent_frequency=3 # Execute every 3 turns
)
)
# Send messages to the group
response = client.groups.messages.create(
group_id=group.id,
messages=[
{"role": "user", "content": "Let's discuss our project roadmap"}
]
)
```
```typescript title="node.js" maxLines=50
```typescript TypeScript maxLines=50
import { LettaClient } from '@letta-ai/letta-client';
const client = new LettaClient();
@@ -174,6 +127,53 @@ const response = await client.groups.messages.create(
}
);
```
```python title="python" maxLines=50
from letta_client import Letta, SleeptimeManager
client = Letta()
# Create main conversation agent
main_agent = client.agents.create(
model="openai/gpt-4.1",
memory_blocks=[
{"label": "persona", "value": "I am the main conversation agent"}
]
)
# Create sleeptime agents for background tasks
monitor_agent = client.agents.create(
model="openai/gpt-4.1",
memory_blocks=[
{"label": "persona", "value": "I monitor conversation sentiment and key topics"}
]
)
summary_agent = client.agents.create(
model="openai/gpt-4.1",
memory_blocks=[
{"label": "persona", "value": "I create periodic summaries of the conversation"}
]
)
# Create a Sleeptime group
group = client.groups.create(
agent_ids=[monitor_agent.id, summary_agent.id],
description="Background agents that process conversation periodically",
manager_config=SleeptimeManager(
manager_agent_id=main_agent.id,
sleeptime_agent_frequency=3 # Execute every 3 turns
)
)
# Send messages to the group
response = client.groups.messages.create(
group_id=group.id,
messages=[
{"role": "user", "content": "Let's discuss our project roadmap"}
]
)
```
</CodeGroup>
## RoundRobin
@@ -212,52 +212,7 @@ sequenceDiagram
### Code Example
<CodeGroup>
```python title="python" maxLines=50
from letta_client import Letta, RoundRobinManager
client = Letta()
# Create agents for the group
agent1 = client.agents.create(
model="openai/gpt-4.1",
memory_blocks=[
{"label": "persona", "value": "I am the first agent in the group"}
]
)
agent2 = client.agents.create(
model="openai/gpt-4.1",
memory_blocks=[
{"label": "persona", "value": "I am the second agent in the group"}
]
)
agent3 = client.agents.create(
model="openai/gpt-4.1",
memory_blocks=[
{"label": "persona", "value": "I am the third agent in the group"}
]
)
# Create a RoundRobin group
group = client.groups.create(
agent_ids=[agent1.id, agent2.id, agent3.id],
description="A group that cycles through agents in order",
manager_config=RoundRobinManager(
max_turns=3 # Optional: defaults to number of agents
)
)
# Send a message to the group
response = client.groups.messages.create(
group_id=group.id,
messages=[
{"role": "user", "content": "Hello group, what are your thoughts on this topic?"}
]
)
```
```typescript title="node.js" maxLines=50
```typescript TypeScript maxLines=50
import { LettaClient } from '@letta-ai/letta-client';
const client = new LettaClient();
@@ -302,6 +257,51 @@ const response = await client.groups.messages.create(
}
);
```
```python title="python" maxLines=50
from letta_client import Letta, RoundRobinManager
client = Letta()
# Create agents for the group
agent1 = client.agents.create(
model="openai/gpt-4.1",
memory_blocks=[
{"label": "persona", "value": "I am the first agent in the group"}
]
)
agent2 = client.agents.create(
model="openai/gpt-4.1",
memory_blocks=[
{"label": "persona", "value": "I am the second agent in the group"}
]
)
agent3 = client.agents.create(
model="openai/gpt-4.1",
memory_blocks=[
{"label": "persona", "value": "I am the third agent in the group"}
]
)
# Create a RoundRobin group
group = client.groups.create(
agent_ids=[agent1.id, agent2.id, agent3.id],
description="A group that cycles through agents in order",
manager_config=RoundRobinManager(
max_turns=3 # Optional: defaults to number of agents
)
)
# Send a message to the group
response = client.groups.messages.create(
group_id=group.id,
messages=[
{"role": "user", "content": "Hello group, what are your thoughts on this topic?"}
]
)
```
</CodeGroup>
## Supervisor
@@ -337,60 +337,7 @@ graph TB
### Code Example
<CodeGroup>
```python title="python" maxLines=50
from letta_client import Letta, SupervisorManager
client = Letta()
# Create supervisor agent
supervisor = client.agents.create(
model="openai/gpt-4.1",
memory_blocks=[
{"label": "persona", "value": "I am a supervisor managing multiple workers"}
]
)
# Create worker agents
worker1 = client.agents.create(
model="openai/gpt-4.1",
memory_blocks=[
{"label": "persona", "value": "I am a data analysis specialist"}
]
)
worker2 = client.agents.create(
model="openai/gpt-4.1",
memory_blocks=[
{"label": "persona", "value": "I am a research specialist"}
]
)
worker3 = client.agents.create(
model="openai/gpt-4.1",
memory_blocks=[
{"label": "persona", "value": "I am a writing specialist"}
]
)
# Create a Supervisor group
group = client.groups.create(
agent_ids=[worker1.id, worker2.id, worker3.id],
description="A supervisor-worker group for parallel task execution",
manager_config=SupervisorManager(
manager_agent_id=supervisor.id
)
)
# Send a message to the group
response = client.groups.messages.create(
group_id=group.id,
messages=[
{"role": "user", "content": "Analyze this data and prepare a report"}
]
)
```
```typescript title="node.js" maxLines=50
```typescript TypeScript maxLines=50
import { LettaClient } from '@letta-ai/letta-client';
const client = new LettaClient();
@@ -443,6 +390,59 @@ const response = await client.groups.messages.create(
}
);
```
```python title="python" maxLines=50
from letta_client import Letta, SupervisorManager
client = Letta()
# Create supervisor agent
supervisor = client.agents.create(
model="openai/gpt-4.1",
memory_blocks=[
{"label": "persona", "value": "I am a supervisor managing multiple workers"}
]
)
# Create worker agents
worker1 = client.agents.create(
model="openai/gpt-4.1",
memory_blocks=[
{"label": "persona", "value": "I am a data analysis specialist"}
]
)
worker2 = client.agents.create(
model="openai/gpt-4.1",
memory_blocks=[
{"label": "persona", "value": "I am a research specialist"}
]
)
worker3 = client.agents.create(
model="openai/gpt-4.1",
memory_blocks=[
{"label": "persona", "value": "I am a writing specialist"}
]
)
# Create a Supervisor group
group = client.groups.create(
agent_ids=[worker1.id, worker2.id, worker3.id],
description="A supervisor-worker group for parallel task execution",
manager_config=SupervisorManager(
manager_agent_id=supervisor.id
)
)
# Send a message to the group
response = client.groups.messages.create(
group_id=group.id,
messages=[
{"role": "user", "content": "Analyze this data and prepare a report"}
]
)
```
</CodeGroup>
## Dynamic
@@ -478,62 +478,7 @@ flowchart LR
### Code Example
<CodeGroup>
```python title="python" maxLines=100
from letta_client import Letta, DynamicManager
client = Letta()
# Create orchestrator agent
orchestrator = client.agents.create(
model="openai/gpt-4.1",
memory_blocks=[
{"label": "persona", "value": "I am an orchestrator that decides who speaks next based on context"}
]
)
# Create participant agents
expert1 = client.agents.create(
model="openai/gpt-4.1",
memory_blocks=[
{"label": "persona", "value": "I am a technical expert"}
]
)
expert2 = client.agents.create(
model="openai/gpt-4.1",
memory_blocks=[
{"label": "persona", "value": "I am a business strategist"}
]
)
expert3 = client.agents.create(
model="openai/gpt-4.1",
memory_blocks=[
{"label": "persona", "value": "I am a creative designer"}
]
)
# Create a Dynamic group
group = client.groups.create(
agent_ids=[expert1.id, expert2.id, expert3.id],
description="A dynamic group where the orchestrator chooses speakers",
manager_config=DynamicManager(
manager_agent_id=orchestrator.id,
termination_token="DONE!", # Optional: default is "DONE!"
max_turns=10 # Optional: prevent infinite loops
)
)
# Send a message to the group
response = client.groups.messages.create(
group_id=group.id,
messages=[
{"role": "user", "content": "Let's design a new product. Who should start?"}
]
)
```
```typescript title="node.js" maxLines=100
```typescript TypeScript maxLines=100
import { LettaClient } from '@letta-ai/letta-client';
const client = new LettaClient();
@@ -588,6 +533,61 @@ const response = await client.groups.messages.create(
}
);
```
```python title="python" maxLines=100
from letta_client import Letta, DynamicManager
client = Letta()
# Create orchestrator agent
orchestrator = client.agents.create(
model="openai/gpt-4.1",
memory_blocks=[
{"label": "persona", "value": "I am an orchestrator that decides who speaks next based on context"}
]
)
# Create participant agents
expert1 = client.agents.create(
model="openai/gpt-4.1",
memory_blocks=[
{"label": "persona", "value": "I am a technical expert"}
]
)
expert2 = client.agents.create(
model="openai/gpt-4.1",
memory_blocks=[
{"label": "persona", "value": "I am a business strategist"}
]
)
expert3 = client.agents.create(
model="openai/gpt-4.1",
memory_blocks=[
{"label": "persona", "value": "I am a creative designer"}
]
)
# Create a Dynamic group
group = client.groups.create(
agent_ids=[expert1.id, expert2.id, expert3.id],
description="A dynamic group where the orchestrator chooses speakers",
manager_config=DynamicManager(
manager_agent_id=orchestrator.id,
termination_token="DONE!", # Optional: default is "DONE!"
max_turns=10 # Optional: prevent infinite loops
)
)
# Send a message to the group
response = client.groups.messages.create(
group_id=group.id,
messages=[
{"role": "user", "content": "Let's design a new product. Who should start?"}
]
)
```
</CodeGroup>
## Handoff (Coming Soon)

View File

@@ -4,6 +4,10 @@ slug: guides/agents/human-in-the-loop
subtitle: How to integrate human-in-the-loop workflows for tool approval
---
<Warning>
Human-in-the-Loop support is experimental and may be unstable. For more information, visit our [Discord](https://discord.gg/letta).
</Warning>
Human-in-the-loop (HITL) workflows allow you to maintain control over critical agent actions by requiring human approval before executing certain tools. This is essential for operations that could have significant consequences, such as database modifications, financial transactions, or external API calls with cost implications.
```mermaid
@@ -119,7 +123,7 @@ agent = client.agents.create(
# ... other configuration
)
```
```typescript node.js maxLines=50
```typescript TypeScript maxLines=50
// Create a tool that requires approval by default
const approvalTool = await client.tools.upsert({
name: "sensitive_operation",
@@ -181,7 +185,7 @@ agent = client.agents.create(
# ... other configuration
)
```
```typescript node.js maxLines=50
```typescript TypeScript maxLines=50
// Create a tool that requires approval by default
const approvalTool = await client.tools.modify({
tool_id=sensitive_operation.id,
@@ -228,7 +232,7 @@ tools = client.agents.tools.list(agent_id=agent.id)
for tool in tools:
print(f"{tool.name}: requires_approval={tool.requires_approval}")
```
```typescript node.js maxLines=50
```typescript TypeScript maxLines=50
// Modify approval requirement for a specific agent
await client.agents.tools.modifyApproval({
agentId: agent.id,
@@ -321,7 +325,7 @@ response = client.agents.messages.create(
"stop_reason": "requires_approval"
}
```
```typescript node.js maxLines=50
```typescript TypeScript maxLines=50
const response = await client.agents.messages.create({
agentId: agent.id,
requestBody: {
@@ -430,7 +434,7 @@ response = client.agents.messages.create(
"stop_reason": "end_turn"
}
```
```typescript node.js maxLines=50
```typescript TypeScript maxLines=50
// Approve the tool call
const response = await client.agents.messages.create({
agentId: agent.id,
@@ -541,7 +545,7 @@ response = client.agents.messages.create(
"stop_reason": "requires_approval"
}
```
```typescript node.js maxLines=50
```typescript TypeScript maxLines=50
// Deny with explanation
const response = await client.agents.messages.create({
agentId: agent.id,
@@ -629,7 +633,7 @@ if run_id:
for chunk in client.runs.stream(run_id, starting_after=last_seq):
print(chunk)
```
```typescript node.js maxLines=70
```typescript TypeScript maxLines=70
// Receive an approval_request_message, then approve in background
const approve = await client.agents.messages.createStream({
agentId: agent.id,

View File

@@ -35,47 +35,7 @@ Create a tool that defines your desired response format. The tool arguments beco
### Creating a Structured Generation Tool
<CodeGroup>
```python title="python" maxLines=100
from letta_client import Letta
# Create client (Letta Cloud)
client = Letta(token="LETTA_API_KEY")
# Or for self-hosted
# client = Letta(base_url="http://localhost:8283")
def generate_rank(rank: int, reason: str):
"""Generate a ranking with explanation.
Args:
rank (int): The numerical rank from 1-10.
reason (str): The reasoning behind the rank.
"""
print("Rank generated")
return
# Create the tool
tool = client.tools.create(func=generate_rank)
# Create agent with the structured generation tool
agent_state = client.agents.create(
model="openai/gpt-4o-mini",
embedding="openai/text-embedding-3-small",
memory_blocks=[
{
"label": "human",
"value": "The human's name is Chad. They are a food enthusiast who enjoys trying different cuisines."
},
{
"label": "persona",
"value": "I am a helpful food critic assistant. I provide detailed rankings and reviews of different foods and restaurants."
}
],
tool_ids=[tool.id]
)
```
```typescript title="node.js" maxLines=100
```typescript TypeScript maxLines=100
import { LettaClient } from '@letta-ai/letta-client'
// Create client (Letta Cloud)
@@ -117,11 +77,78 @@ const agentState = await client.agents.create({
toolIds: [tool.id]
});
```
```python title="python" maxLines=100
from letta_client import Letta
# Create client (Letta Cloud)
client = Letta(token="LETTA_API_KEY")
# Or for self-hosted
# client = Letta(base_url="http://localhost:8283")
def generate_rank(rank: int, reason: str):
"""Generate a ranking with explanation.
Args:
rank (int): The numerical rank from 1-10.
reason (str): The reasoning behind the rank.
"""
print("Rank generated")
return
# Create the tool
tool = client.tools.create(func=generate_rank)
# Create agent with the structured generation tool
agent_state = client.agents.create(
model="openai/gpt-4o-mini",
embedding="openai/text-embedding-3-small",
memory_blocks=[
{
"label": "human",
"value": "The human's name is Chad. They are a food enthusiast who enjoys trying different cuisines."
},
{
"label": "persona",
"value": "I am a helpful food critic assistant. I provide detailed rankings and reviews of different foods and restaurants."
}
],
tool_ids=[tool.id]
)
```
</CodeGroup>
### Using the Structured Generation Tool
<CodeGroup>
```typescript TypeScript maxLines=100
// Send message and instruct agent to use the tool
const response = await client.agents.messages.create(
agentState.id, {
messages: [
{
role: "user",
content: "How do you rank sushi as a food? Please use the generate_rank tool to provide your response."
}
]
}
);
// Extract structured data from tool call
for (const message of response.messages) {
if (message.messageType === "tool_call_message") {
const args = JSON.parse(message.toolCall.arguments);
console.log(`Rank: ${args.rank}`);
console.log(`Reason: ${args.reason}`);
}
}
// Example output:
// Rank: 8
// Reason: Sushi is a highly regarded cuisine known for its fresh ingredients...
```
```python title="python" maxLines=100
# Send message and instruct agent to use the tool
response = client.agents.messages.create(
@@ -148,33 +175,6 @@ for message in response.messages:
# Rank: 8
# Reason: Sushi is a highly regarded cuisine known for its fresh ingredients...
```
```typescript title="node.js" maxLines=100
// Send message and instruct agent to use the tool
const response = await client.agents.messages.create(
agentState.id, {
messages: [
{
role: "user",
content: "How do you rank sushi as a food? Please use the generate_rank tool to provide your response."
}
]
}
);
// Extract structured data from tool call
for (const message of response.messages) {
if (message.messageType === "tool_call_message") {
const args = JSON.parse(message.toolCall.arguments);
console.log(`Rank: ${args.rank}`);
console.log(`Reason: ${args.reason}`);
}
}
// Example output:
// Rank: 8
// Reason: Sushi is a highly regarded cuisine known for its fresh ingredients...
```
</CodeGroup>
The agent will call the tool, and you can extract the structured arguments:
@@ -201,45 +201,7 @@ Under the hood, `response_format` overrides the schema for the `send_message` to
### Basic JSON Mode
<CodeGroup>
```python title="python" maxLines=100
from letta_client import Letta
# Create client (Letta Cloud)
client = Letta(token="LETTA_API_KEY")
# Create agent with basic JSON mode (OpenAI/compatible providers only)
agent_state = client.agents.create(
model="openai/gpt-4o-mini",
embedding="openai/text-embedding-3-small",
memory_blocks=[
{
"label": "human",
"value": "The human's name is Chad. They work as a data analyst and prefer clear, organized information."
},
{
"label": "persona",
"value": "I am a helpful assistant who provides clear and well-organized responses."
}
],
response_format={"type": "json_object"}
)
# Send message expecting JSON response
response = client.agents.messages.create(
agent_id=agent_state.id,
messages=[
{
"role": "user",
"content": "How do you rank sushi as a food? Please respond in JSON format with rank and reason fields."
}
]
)
for message in response.messages:
print(message)
```
```typescript title="node.js" maxLines=100
```typescript TypeScript maxLines=100
import { LettaClient } from '@letta-ai/letta-client'
// Create client (Letta Cloud)
@@ -278,6 +240,44 @@ for (const message of response.messages) {
console.log(message);
}
```
```python title="python" maxLines=100
from letta_client import Letta
# Create client (Letta Cloud)
client = Letta(token="LETTA_API_KEY")
# Create agent with basic JSON mode (OpenAI/compatible providers only)
agent_state = client.agents.create(
model="openai/gpt-4o-mini",
embedding="openai/text-embedding-3-small",
memory_blocks=[
{
"label": "human",
"value": "The human's name is Chad. They work as a data analyst and prefer clear, organized information."
},
{
"label": "persona",
"value": "I am a helpful assistant who provides clear and well-organized responses."
}
],
response_format={"type": "json_object"}
)
# Send message expecting JSON response
response = client.agents.messages.create(
agent_id=agent_state.id,
messages=[
{
"role": "user",
"content": "How do you rank sushi as a food? Please respond in JSON format with rank and reason fields."
}
]
)
for message in response.messages:
print(message)
```
</CodeGroup>
### Advanced JSON Schema Mode
@@ -285,73 +285,7 @@ for (const message of response.messages) {
For more precise control, you can use OpenAI's `json_schema` mode with strict validation:
<CodeGroup>
```python title="python" maxLines=100
from letta_client import Letta
client = Letta(token="LETTA_API_KEY")
# Define structured schema (from OpenAI structured outputs guide)
response_format = {
"type": "json_schema",
"json_schema": {
"name": "food_ranking",
"schema": {
"type": "object",
"properties": {
"rank": {
"type": "integer",
"minimum": 1,
"maximum": 10
},
"reason": {
"type": "string"
},
"categories": {
"type": "array",
"items": {
"type": "object",
"properties": {
"name": { "type": "string" },
"score": { "type": "integer" }
},
"required": ["name", "score"],
"additionalProperties": False
}
}
},
"required": ["rank", "reason", "categories"],
"additionalProperties": False
},
"strict": True
}
}
# Create agent
agent_state = client.agents.create(
model="openai/gpt-4o-mini",
embedding="openai/text-embedding-3-small",
memory_blocks=[]
)
# Update agent with response format
agent_state = client.agents.update(
agent_id=agent_state.id,
response_format=response_format
)
# Send message
response = client.agents.messages.create(
agent_id=agent_state.id,
messages=[
{"role": "user", "content": "How do you rank sushi? Include categories for taste, presentation, and value."}
]
)
for message in response.messages:
print(message)
```
```typescript title="node.js" maxLines=100
```typescript TypeScript maxLines=100
import { LettaClient } from '@letta-ai/letta-client'
const client = new LettaClient({ token: "LETTA_API_KEY" });
@@ -418,6 +352,72 @@ for (const message of response.messages) {
console.log(message);
}
```
```python title="python" maxLines=100
from letta_client import Letta
client = Letta(token="LETTA_API_KEY")
# Define structured schema (from OpenAI structured outputs guide)
response_format = {
"type": "json_schema",
"json_schema": {
"name": "food_ranking",
"schema": {
"type": "object",
"properties": {
"rank": {
"type": "integer",
"minimum": 1,
"maximum": 10
},
"reason": {
"type": "string"
},
"categories": {
"type": "array",
"items": {
"type": "object",
"properties": {
"name": { "type": "string" },
"score": { "type": "integer" }
},
"required": ["name", "score"],
"additionalProperties": False
}
}
},
"required": ["rank", "reason", "categories"],
"additionalProperties": False
},
"strict": True
}
}
# Create agent
agent_state = client.agents.create(
model="openai/gpt-4o-mini",
embedding="openai/text-embedding-3-small",
memory_blocks=[]
)
# Update agent with response format
agent_state = client.agents.update(
agent_id=agent_state.id,
response_format=response_format
)
# Send message
response = client.agents.messages.create(
agent_id=agent_state.id,
messages=[
{"role": "user", "content": "How do you rank sushi? Include categories for taste, presentation, and value."}
]
)
for message in response.messages:
print(message)
```
</CodeGroup>
With structured JSON schema, the agent's response will be strictly validated:
@@ -440,6 +440,18 @@ With structured JSON schema, the agent's response will be strictly validated:
You can update an existing agent's response format:
<CodeGroup>
```typescript TypeScript maxLines=100
// Update agent to use JSON mode (OpenAI/compatible only)
await client.agents.update(agentState.id, {
responseFormat: { type: "json_object" }
});
// Or remove JSON mode
await client.agents.update(agentState.id, {
responseFormat: null
});
```
```python title="python" maxLines=100
# Update agent to use JSON mode (OpenAI/compatible only)
client.agents.update(
@@ -453,16 +465,4 @@ client.agents.update(
response_format=None
)
```
```typescript title="node.js" maxLines=100
// Update agent to use JSON mode (OpenAI/compatible only)
await client.agents.update(agentState.id, {
responseFormat: { type: "json_object" }
});
// Or remove JSON mode
await client.agents.update(agentState.id, {
responseFormat: null
});
```
</CodeGroup>

View File

@@ -79,7 +79,7 @@ for chunk in stream:
for chunk in client.runs.stream(run_id, starting_after=last_seq_id):
print(chunk)
```
```typescript node.js maxLines=50
```typescript TypeScript maxLines=50
const stream = await client.agents.messages.createStream({
agentId: agentState.id,
requestBody: {
@@ -151,6 +151,57 @@ for chunk in approve:
for chunk in client.runs.stream(run_id, starting_after=approve_seq):
print(chunk)
```
```typescript TypeScript maxLines=60
// 1) Start background stream and capture approval request
const stream = await client.agents.messages.createStream(
agent.id, {
messages: [{role: "user", content: "Do a sensitive operation"}],
streamTokens: true,
background: true,
}
);
let approvalRequestId = null;
let origRunId = null;
let lastSeqId = 0;
for await (const chunk of stream) {
if (chunk.runId && chunk.seqId) {
origRunId = chunk.runId;
lastSeqId = chunk.seqId;
}
if (chunk.messageType === "approval_request_message") {
approvalRequestId = chunk.id;
break;
}
}
// 2) Approve in background; capture the approval stream cursor (this creates a new run)
const approveStream = await client.agents.messages.createStream(
agent.id, {
messages: [{type: "approval", approve: true, approvalRequestId}],
streamTokens: true,
background: true,
}
);
let runId = null;
let approveSeq = 0;
for await (const chunk of approveStream) {
if (chunk.runId && chunk.seqId) {
runId = chunk.runId;
approveSeq = chunk.seqId;
}
if (chunk.messageType === "tool_return_message") {
// Tool result arrives here on the approval stream
break;
}
}
// 3) Resume that run to read follow-up tokens
for await (const chunk of client.runs.stream(runId, {startingAfter: approveSeq})) {
console.log(chunk);
}
```
</CodeGroup>
### HITL in Background Mode
@@ -240,7 +291,7 @@ for chunk in approve:
for chunk in client.runs.stream(run_id, starting_after=approve_seq):
print(chunk)
```
```typescript node.js maxLines=70
```typescript TypeScript maxLines=70
// 1) Start background stream and capture approval request
const stream = await client.agents.messages.createStream({
agentId: agent.id,
@@ -343,7 +394,7 @@ if active_runs:
for chunk in stream:
print(chunk)
```
```typescript node.js maxLines=50
```typescript TypeScript maxLines=50
// Find and resume active background streams
const activeRuns = await client.runs.active({
agentIds: ["agent-123", "agent-456"],
@@ -416,7 +467,7 @@ while run.status != "completed":
# Get the messages once complete
messages = client.runs.messages.list(run_id=run.id)
```
```typescript node.js maxLines=50
```typescript TypeScript maxLines=50
// Start async operation (returns immediately with run ID)
const run = await client.agents.createAgentMessageAsync({
agentId: agentState.id,
@@ -501,7 +552,7 @@ for chunk in stream:
continue
print(chunk)
```
```typescript node.js maxLines=50
```typescript TypeScript maxLines=50
// Configure client with extended timeout
import { Letta } from '@letta/sdk';

View File

@@ -25,6 +25,25 @@ Set `enable_sleeptime` to `true` to enable the sleep-time agent which will manag
Additionally, set `initial_message_sequence` to an empty array to start the conversation with no initial messages for a completely empty initial message buffer.
<CodeGroup>
```typescript TypeScript
import { LettaClient } from '@letta-ai/letta-client'
const client = new LettaClient({ token: "LETTA_API_KEY" });
// create the Letta agent
const agent = await client.agents.create({
agentType: "voice_convo_agent",
memoryBlocks: [
{ value: "Name: ?", label: "human" },
{ value: "You are a helpful assistant.", label: "persona" },
],
model: "openai/gpt-4o-mini", // Use 4o-mini for speed
embedding: "openai/text-embedding-3-small",
enableSleeptime: true,
initialMessageSequence: [],
});
```
```python title="python"
from letta_client import Letta
@@ -44,25 +63,6 @@ agent = client.agents.create(
)
```
```typescript title="node.js"
import { LettaClient } from '@letta-ai/letta-client'
const client = new LettaClient({ token: "LETTA_API_KEY" });
// create the Letta agent
const agent = await client.agents.create({
agentType: "voice_convo_agent",
memoryBlocks: [
{ value: "Name: ?", label: "human" },
{ value: "You are a helpful assistant.", label: "persona" },
],
model: "openai/gpt-4o-mini", // Use 4o-mini for speed
embedding: "openai/text-embedding-3-small",
enableSleeptime: true,
initialMessageSequence: [],
});
```
```bash title="curl"
curl -X POST https://api.letta.com/v1/agents \
-H "Authorization: Bearer $LETTA_API_KEY" \

View File

@@ -84,29 +84,6 @@ The agent type `memgpt_v2_agent` implements the latest iteration of the MemGPT a
## Creating MemGPT Agents
<CodeGroup>
```python title="Python"
from letta_client import Letta
client = Letta(token="LETTA_API_KEY")
agent_state = client.agents.create(
agent_type="memgpt_v2_agent", # or "memgpt_agent" for v1
model="openai/gpt-4.1",
embedding="openai/text-embedding-3-small",
memory_blocks=[
{
"label": "human",
"value": "The human's name is Chad. They like vibe coding."
},
{
"label": "persona",
"value": "My name is Sam, the all-knowing sentient AI."
}
],
tools=["web_search", "run_code"]
)
```
```typescript title="TypeScript"
import { LettaClient } from '@letta-ai/letta-client'
@@ -130,6 +107,29 @@ const agentState = await client.agents.create({
});
```
```python title="Python"
from letta_client import Letta
client = Letta(token="LETTA_API_KEY")
agent_state = client.agents.create(
agent_type="memgpt_v2_agent", # or "memgpt_agent" for v1
model="openai/gpt-4.1",
embedding="openai/text-embedding-3-small",
memory_blocks=[
{
"label": "human",
"value": "The human's name is Chad. They like vibe coding."
},
{
"label": "persona",
"value": "My name is Sam, the all-knowing sentient AI."
}
],
tools=["web_search", "run_code"]
)
```
```bash title="cURL"
curl -X POST https://api.letta.com/v1/agents \
-H "Authorization: Bearer $LETTA_API_KEY" \

View File

@@ -0,0 +1,295 @@
---
title: Agent Memory
subtitle: How Letta agents manage and evolve their memory
slug: guides/agents/memory
---
<Tip>
Want to dive deeper? Read our blog posts on [agent memory](https://www.letta.com/blog/agent-memory), [context engineering](https://www.letta.com/blog/guide-to-context-engineering), [memory blocks](https://www.letta.com/blog/memory-blocks), and [RAG vs agent memory](https://www.letta.com/blog/rag-vs-agent-memory).
</Tip>
## What is agent memory?
**Agent memory in Letta is about managing what information is visible in the agent's context window.**
Unlike traditional LLMs that are stateless (forgetting everything between interactions), Letta agents maintain persistent, evolving memory by intelligently managing their context window over time.
The key insight: **the context window is a scarce resource.** You can't fit an entire conversation history or knowledge base into it. Effective memory is about:
- **What's in context right now** (immediately visible to the LLM)
- **What's been moved to external storage** (retrievable when needed)
- **Who decides what stays and what goes** (the agent itself)
## The LLM Operating System
Letta is built on the [MemGPT](https://arxiv.org/abs/2310.08560) paper, which introduced the concept of an "LLM Operating System" for memory management. Just like a computer OS manages different types of memory (registers, RAM, disk), Letta agents manage different tiers of information:
```mermaid
flowchart TB
subgraph ContextWindow["⚡ CONTEXT WINDOW (What the LLM sees)"]
direction TB
System[System Prompt<br/>Kernel context]
Blocks[Memory Blocks<br/>Agent-managed context]
Messages[Recent Messages<br/>Conversation buffer]
end
subgraph External["💾 EXTERNAL STORAGE (Retrieved on-demand)"]
direction TB
Recall[Recall Memory<br/>Full conversation history]
Archival[Archival Memory<br/>Explicit facts & knowledge]
Files[Data Sources<br/>Documents & files]
end
Blocks -->|Agent edits| Blocks
Messages -->|Overflow| Recall
ContextWindow -.->|Agent searches| External
```
### Memory tiers explained
| Tier | Size | Speed | Managed By | Purpose |
|------|------|-------|------------|---------|
| **System Prompt** | ~1-2K tokens | Instant | System | Agent instructions & behavior |
| **Memory Blocks** | ~2-4K tokens total | Instant | **Agent** | Self-editing structured memory |
| **Message Buffer** | Variable | Instant | System | Recent conversation flow |
| **Recall Memory** | Unlimited | 1-2 sec | Agent via search | Past conversation history |
| **Archival Memory** | Unlimited | 1-2 sec | Agent via search | Explicit facts & knowledge |
| **Data Sources** | Unlimited | 1-2 sec | Agent via search | Uploaded documents |
## Memory blocks: Units of abstraction
**Memory blocks are discrete, structured sections of the context window that agents can read and edit.**
Think of memory blocks as "variables" that persist across interactions:
```python
# Traditional approach: everything is ephemeral
messages = [
{"role": "user", "content": "I'm Sarah, I like Python"},
{"role": "assistant", "content": "Hi Sarah!"},
{"role": "user", "content": "What's my name?"}, # Model only "knows" from message history
]
# Letta approach: structured, persistent memory blocks
memory_blocks = [
{
"label": "human",
"value": "Name: Sarah\nPreferences: Python programming",
"description": "Key details about the user"
},
{
"label": "persona",
"value": "I am a helpful coding assistant",
"description": "My identity and behavior"
}
]
# Agent can edit these blocks over time as it learns more
```
### Why memory blocks?
**Memory blocks solve the fundamental challenge of context window management:**
1. **Consistency**: Same information is visible across all interactions (not dependent on what fits in message buffer)
2. **Editability**: Agents can update their understanding over time (not just accumulate)
3. **Structure**: Organized sections instead of unstructured message history
4. **Control**: Agents decide what's important enough to persist
### Default memory blocks
Letta agents typically start with two memory blocks:
**Persona Block** - Who the agent is
```
My name is Sam. I am a friendly, professional assistant who helps users
with programming questions. I prefer concise explanations with code examples.
```
**Human Block** - Who the user is
```
The user's name is Sarah. She is a Python developer working on AI applications.
She prefers detailed technical explanations and appreciates best practices.
```
You can add custom blocks for any purpose:
- **Project context**: Current task, goals, progress
- **Organization info**: Company policies, shared knowledge
- **Conversation state**: Multi-step workflow tracking
## Agentic context engineering
**The key innovation in Letta: agents manage their own memory using tools.**
Instead of a fixed context window or simple retrieval, agents actively decide:
- What to remember (write to memory blocks)
- What to forget (remove outdated information)
- What to search for (query external storage)
- How to organize knowledge (restructure memory blocks)
### Memory management tools
Agents have access to these built-in tools:
- `memory_insert` - Add new information to a memory block
- `memory_replace` - Update or rewrite part of a memory block
- `conversation_search` - Search past messages (recall memory)
- `archival_memory_insert` - Store facts in long-term storage
- `archival_memory_search` - Retrieve facts from long-term storage
Example of an agent using memory tools:
```
User: "I'm working on a Next.js app now, not Django anymore"
Agent thinks: "User has shifted tech stacks. I should update my memory."
Agent calls: memory_replace(
block_label="human",
old_text="She is a Python developer working on Django apps",
new_text="She is a full-stack developer currently working on Next.js apps"
)
Agent responds: "Got it! I've updated my notes that you're now working with Next.js."
```
## RAG vs Agent Memory
**Traditional RAG (Retrieval-Augmented Generation):**
- Retrieves semantically similar chunks
- One-shot retrieval per interaction
- Purely reactive (only searches when prompted)
- No persistent understanding
**Letta Agent Memory:**
- Maintains structured, editable memory in context
- Multi-step retrieval (can paginate, refine searches)
- Proactive management (updates memory as it learns)
- Persistent understanding that improves over time
### When to use what
Use **memory blocks** for:
- Information that should be consistently visible
- Knowledge that evolves (user preferences, project state)
- Structured context (persona, relationships, goals)
Use **external memory (RAG-style)** for:
- Large corpora of documents
- Historical conversation logs
- Facts that rarely change
- Information that's too large for context
**Best practice**: Combine both. Memory blocks hold the "executive summary" while external storage holds the full details.
## Sleep-time agents
<Info>
Sleep-time agents are an advanced feature for memory management. See [sleep-time agents guide](/guides/agents/sleep-time-agents) for details.
</Info>
Letta supports **sleep-time compute**: background agents that process and optimize memory while the main agent is idle. This enables:
- **Lower latency**: Main agent doesn't spend time on memory management
- **Better memory**: Dedicated agent can do deeper analysis and reorganization
- **Consistent memory**: Sleep-time agent maintains memory quality over time
Think of it like how humans process memories during sleep - consolidating experiences and strengthening important connections.
## Memory best practices
### 1. Start with clear, specific memory blocks
```python
# ❌ Vague
{"label": "info", "value": "stuff about the user"}
# ✅ Specific
{"label": "user_preferences", "value": "Prefers: Python, VS Code, detailed explanations\nDislikes: Java, Eclipse"}
```
### 2. Write good descriptions
The `description` field tells the agent **when and how** to use the block:
```python
# ❌ Vague description
{
"label": "project",
"description": "Project info",
"value": "Building a chatbot"
}
# ✅ Clear description
{
"label": "project_context",
"description": "Current project goals, status, and blockers. Update as progress is made.",
"value": "Building a customer support chatbot. Status: MVP complete. Next: Add knowledge base integration."
}
```
### 3. Use read-only blocks for shared knowledge
```python
# Shared organizational knowledge that shouldn't change
{
"label": "company_policies",
"description": "Company policies and guidelines for reference",
"value": "Support hours: 9am-5pm PT. Escalation path: ...",
"read_only": True # Agent can read but not edit
}
```
### 4. Monitor memory block usage
- Check if blocks are hitting size limits
- Review if agents are actually using the blocks effectively
- Adjust descriptions if agents misuse blocks
## Memory in multi-agent systems
Memory blocks enable powerful multi-agent patterns:
### Shared memory
Multiple agents can share the same memory block:
```python
# Create shared organizational knowledge
org_block = client.blocks.create(
label="organization",
value="Mission: Help users build AI agents...",
description="Shared organizational context"
)
# Both agents see the same block
agent1 = client.agents.create(block_ids=[org_block.id], ...)
agent2 = client.agents.create(block_ids=[org_block.id], ...)
```
### Cross-agent memory updates
Agents can update each other's memory:
```python
# Supervisor agent updates worker agent's context
supervisor_tool = """
def update_worker_context(new_task_description: str):
client.agents.blocks.modify(
agent_id=worker_agent_id,
block_label="current_task",
value=new_task_description
)
"""
```
## Next steps
- [Memory Blocks API](/guides/agents/memory-blocks) - Creating and managing memory blocks
- [Context Engineering](/guides/agents/context-engineering) - Advanced memory management patterns
- [Multi-Agent Shared Memory](/guides/agents/multiagent-memory) - Coordinating memory across agents
- [Sleep-Time Agents](/guides/agents/sleep-time-agents) - Background memory processing
## Further reading
- [Blog: Agent Memory](https://www.letta.com/blog/agent-memory)
- [Blog: Guide to Context Engineering](https://www.letta.com/blog/guide-to-context-engineering)
- [Blog: Memory Blocks](https://www.letta.com/blog/memory-blocks)
- [Blog: RAG vs Agent Memory](https://www.letta.com/blog/rag-vs-agent-memory)
- [MemGPT Research Paper](https://arxiv.org/abs/2310.08560)

View File

@@ -4,48 +4,111 @@ subtitle: What is agent memory, and how does it work?
slug: guides/agents/memory
---
Agent memory is what enables AI agents to maintain persistent state, learn from interactions, and develop long-term relationships with users. Unlike traditional chatbots that treat each conversation as isolated, agents with sophisticated memory systems can build understanding over time.
## What is agent memory?
## The MemGPT Approach to Memory
**Agent memory in Letta is about managing what information is in the agent's context window.**
Letta is built by the creators of [MemGPT](https://arxiv.org/abs/2310.08560), a research paper that introduced the concept of an "LLM Operating System" for memory management. The base agent design in Letta is a MemGPT-style agent, which means it inherits the core principles of:
The context window is a scarce resource - you can't fit everything into it. Effective memory management is about deciding what stays in context (immediately visible) and what moves to external storage (retrieved when needed).
- **Self-editing memory**: Agents can modify their own memory using tools
- **Memory hierarchy**: Different types of memory for different purposes
- **Context window management**: Intelligent loading and unloading of information
Agent memory enables AI agents to maintain persistent state, learn from interactions, and develop long-term relationships with users. Unlike traditional chatbots that treat each conversation as isolated, agents with sophisticated memory systems can build understanding over time.
## Types of Memory in Letta
Letta agents have access to multiple memory systems:
### Core Memory (In-Context)
Fast, always-accessible memory that stays in the agent's context window. This includes:
- **Persona**: The agent's personality and role
- **Human**: Information about the user
- **Custom memory blocks**: Additional structured information
Memory blocks are structured sections of the agent's context window that persist across all interactions. They are always visible - no retrieval needed.
**Memory blocks are Letta's core abstraction.** You can create blocks with any descriptive label - the agent learns how to use them autonomously. This enables everything from simple user preferences to sophisticated multi-agent coordination.
[Learn more about memory blocks →](/guides/agents/memory-blocks)
### External Memory (Out-of-Context)
Long-term storage for large amounts of information:
- Conversation history beyond context limits (e.g. "recall memory")
- Vector databases for semantic search (e.g. "archival memory")
- Uploaded documents and files
External memory provides unlimited storage for information that doesn't need to be always visible. Agents retrieve from external memory on-demand using search tools.
## Why Agent Memory Matters
Letta provides several built-in external memory systems:
- **Conversation search** - Search past messages using full-text and semantic search
- **Archival memory** - Agent-managed semantically searchable database for facts and knowledge
- **Letta Filesystem** - File management system for documents and data ([learn more](/guides/agents/filesystem))
Effective memory management enables:
Agents can also access any external data source through [MCP servers](/guides/mcp/overview) or [custom tools](/guides/agents/custom-tools) - databases, APIs, vector stores, or third-party services.
- **Personalization**: Agents remember user preferences and history
- **Learning**: Agents improve performance through accumulated experience
- **Context preservation**: Important information persists across conversations
- **Scalability**: Handle unlimited conversation length and data volume
## How Agents Manage Their Memory
## Memory Management in Practice
**What makes Letta unique is that agents don't just read from memory - they actively manage it.** Unlike traditional RAG systems that passively retrieve information, Letta agents use built-in tools to decide what to remember, update, and search for.
Letta provides multiple ways to work with agent memory:
When a user mentions they've switched from Python to TypeScript, the agent may choose to update its memory:
- **Automatic management**: Agents intelligently decide what to remember
- **Manual control**: Developers can directly view and modify memory blocks
- **Shared memory**: Multiple agents can access common memory blocks
- **External data sources**: Connect agents to files, databases, and APIs
<CodeGroup>
```typescript TypeScript
memory_replace(
block_label: "human",
old_text: "Prefers Python for development",
new_text: "Currently using TypeScript for main project"
)
```
```python Python
memory_replace(
block_label="human",
old_text="Prefers Python for development",
new_text="Currently using TypeScript for main project"
)
```
</CodeGroup>
Memory blocks are the fundamental units of Letta's memory system - they can be modified by the agent itself, other agents, or developers through the API.
Agents have three primary tools for editing memory blocks:
- `memory_replace` - Search and replace for precise edits
- `memory_insert` - Insert a line into a block
- `memory_rethink` - Rewrite an entire block
These tools can be attached or detached based on your use case. Not all agents need all tools (for example, some agents may not need `memory_rethink`), and memory tools can be removed entirely from an agent if needed.
The agent decides what information is important enough to persist in its memory blocks, actively maintaining this information over time. This enables agents to build understanding through conversation rather than just retrieving relevant documents.
## Memory Blocks vs RAG
Traditional RAG retrieves semantically similar chunks on-demand. Letta's memory blocks are **persistent, structured context** that agents actively maintain.
**Use memory blocks for:**
- Information that should always be visible (user preferences, agent persona)
- Knowledge that evolves over time (project status, learned preferences)
**Use external memory (RAG-style) for:**
- Large document collections
- Historical conversation logs
- Static reference material
**Best practice:** Use both together. Memory blocks hold the "executive summary" while external storage holds the full details.
## Research Background
Letta is built by the creators of [MemGPT](https://arxiv.org/abs/2310.08560), a research paper that introduced the concept of an "LLM Operating System" for memory management. The base agent design in Letta is a MemGPT-style agent, which inherits core principles of self-editing memory, memory hierarchy, and intelligent context window management.
## Next steps
<CardGroup cols={2}>
<Card
title="Memory Blocks Guide"
href="/guides/agents/memory-blocks"
>
Learn how to implement and configure memory blocks in your agents
</Card>
<Card
title="Context Engineering"
href="/guides/agents/context-engineering"
>
Optimize memory performance and advanced memory management
</Card>
<Card
title="Shared Memory Patterns"
href="/guides/agents/multiagent-memory"
>
Use shared memory across multiple agents
</Card>
<Card
title="MemGPT Paper"
href="https://arxiv.org/abs/2310.08560"
>
Read the research behind Letta's memory system
</Card>
</CardGroup>

View File

@@ -8,6 +8,30 @@ slug: guides/agents/memory-blocks
Interested in learning more about the origin of memory blocks? Read our [blog post](https://www.letta.com/blog/memory-blocks).
</Info>
## What are memory blocks?
Memory blocks are structured sections of the agent's context window that persist across all interactions. They are always visible - no retrieval needed.
**Memory blocks are Letta's core abstraction.** Create a block with a descriptive label and the agent learns how to use it. This simple mechanism enables capabilities impossible with traditional context management.
**Key properties:**
- **Agent-managed** - Agents autonomously organize information based on block labels
- **Flexible** - Use for any purpose: knowledge, guidelines, state tracking, scratchpad space
- **Shareable** - Multiple agents can access the same block; update once, visible everywhere
- **Always visible** - Blocks stay in context, never need retrieval
**Examples:**
- Store tool usage guidelines so agents avoid past mistakes
- Maintain working memory in a scratchpad block
- Mirror external state (user's current document) for real-time awareness
- Share read-only policies across all agents from a central source
- Coordinate multi-agent systems: parent agents watch subagent result blocks update in real-time
- Enable emergent behavior: add `performance_tracking` or `emotional_state` and watch agents start using them
Memory blocks aren't just storage - they're a coordination primitive that enables sophisticated agent behavior.
## Memory block structure
Memory blocks represent a section of an agent's context window. An agent may have multiple memory blocks, or none at all. A memory block consists of:
* A `label`, which is a unique identifier for the block
* A `description`, which describes the purpose of the block
@@ -36,6 +60,33 @@ Read-only blocks are useful when you want to give an agent access to information
## Creating an agent with memory blocks
When you create an agent, you can specify memory blocks to also be created with the agent. For most chat applications, we recommend create a `human` block (to represent memories about the user) and a `persona` block (to represent the agent's persona).
<CodeGroup>
```typescript TypeScript maxLines=50
// install letta-client with `npm install @letta-ai/letta-client`
import { LettaClient } from '@letta-ai/letta-client'
// create a client to connect to your local Letta server
const client = new LettaClient({
baseUrl: "http://localhost:8283"
});
// create an agent with two basic self-editing memory blocks
const agentState = await client.agents.create({
memoryBlocks: [
{
label: "human",
value: "The human's name is Bob the Builder.",
limit: 5000
},
{
label: "persona",
value: "My name is Sam, the all-knowing sentient AI.",
limit: 5000
}
],
model: "openai/gpt-4o-mini",
embedding: "openai/text-embedding-3-small"
});
```
```python title="python" maxLines=50
# install letta_client with `pip install letta-client`
from letta_client import Letta
@@ -63,33 +114,6 @@ agent_state = client.agents.create(
embedding="openai/text-embedding-3-small"
)
```
```typescript maxLines=50 title="node.js"
// install letta-client with `npm install @letta-ai/letta-client`
import { LettaClient } from '@letta-ai/letta-client'
// create a client to connect to your local Letta server
const client = new LettaClient({
baseUrl: "http://localhost:8283"
});
// create an agent with two basic self-editing memory blocks
const agentState = await client.agents.create({
memoryBlocks: [
{
label: "human",
value: "The human's name is Bob the Builder.",
limit: 5000
},
{
label: "persona",
value: "My name is Sam, the all-knowing sentient AI.",
limit: 5000
}
],
model: "openai/gpt-4o-mini",
embedding: "openai/text-embedding-3-small"
});
```
</CodeGroup>
When the agent is created, the corresponding blocks are also created and attached to the agent, so that the block value will be in the context window.
@@ -98,44 +122,7 @@ You can also directly create blocks and attach them to an agent. This can be use
Below is an example of creating a block directory, and attaching the block to two agents by specifying the `block_ids` field.
<CodeGroup>
```python title="python" maxLines=50
# create a persisted block, which can be attached to agents
block = client.blocks.create(
label="organization",
description="A block to store information about the organization",
value="Organization: Letta",
limit=4000,
)
# create an agent with both a shared block and its own blocks
shared_block_agent1 = client.agents.create(
name="shared_block_agent1",
memory_blocks=[
{
"label": "persona",
"value": "I am agent 1"
},
],
block_ids=[block.id],
model="openai/gpt-4o-mini",
embedding="openai/text-embedding-3-small"
)
# create another agent sharing the block
shared_block_agent2 = client.agents.create(
name="shared_block_agent2",
memory_blocks=[
{
"label": "persona",
"value": "I am agent 2"
},
],
block_ids=[block.id],
model="openai/gpt-4o-mini",
embedding="openai/text-embedding-3-small"
)
```
```typescript maxLines=50 title="node.js"
```typescript TypeScript maxLines=50
// create a persisted block, which can be attached to agents
const block = await client.blocks.create({
label: "organization",
@@ -173,9 +160,51 @@ const sharedBlockAgent2 = await client.agents.create({
embedding: "openai/text-embedding-3-small"
});
```
```python title="python" maxLines=50
# create a persisted block, which can be attached to agents
block = client.blocks.create(
label="organization",
description="A block to store information about the organization",
value="Organization: Letta",
limit=4000,
)
# create an agent with both a shared block and its own blocks
shared_block_agent1 = client.agents.create(
name="shared_block_agent1",
memory_blocks=[
{
"label": "persona",
"value": "I am agent 1"
},
],
block_ids=[block.id],
model="openai/gpt-4o-mini",
embedding="openai/text-embedding-3-small"
)
# create another agent sharing the block
shared_block_agent2 = client.agents.create(
name="shared_block_agent2",
memory_blocks=[
{
"label": "persona",
"value": "I am agent 2"
},
],
block_ids=[block.id],
model="openai/gpt-4o-mini",
embedding="openai/text-embedding-3-small"
)
```
</CodeGroup>
You can also attach blocks to existing agents:
```python
<CodeGroup>
```typescript TypeScript
await client.agents.blocks.attach(agent.id, block.id);
```
```python Python
client.agents.blocks.attach(agent_id=agent.id, block_id=block.id)
```
</CodeGroup>
You can see all agents attached to a block by using the `block_id` field in the [blocks retrieve](/api-reference/blocks/retrieve) endpoint.

View File

@@ -0,0 +1,515 @@
---
title: Message Types
subtitle: Understanding message types and working with agent message history
slug: guides/agents/message-types
---
When you interact with a Letta agent and retrieve its message history using `client.agents.messages.list()`, you'll receive various types of messages that represent different aspects of the agent's execution. This guide explains all message types and how to work with them.
## Overview
Letta uses a structured message system where each message has a specific `message_type` field that indicates its purpose. Messages are returned as instances of `LettaMessageUnion`, which is a discriminated union of all possible message types.
## Message Type Categories
### User and System Messages
#### `user_message`
Messages sent by the user or system events packaged as user input.
**Structure:**
```typescript
{
id: string;
date: datetime;
message_type: "user_message";
content: string | Array<TextContent | ImageContent>;
name?: string;
otid?: string;
sender_id?: string;
}
```
**Special User Message Subtypes:**
User messages can contain JSON with a `type` field indicating special message subtypes:
- **`heartbeat`** - Automated timer events that allow agents to chain multiple tool calls. See [Heartbeats](/guides/agents/heartbeats) for more details.
```json
{
"type": "heartbeat",
"reason": "Automated timer",
"time": "2025-10-03 12:34:56 PM PDT-0700"
}
```
- **`login`** - User login events
```json
{
"type": "login",
"last_login": "Never (first login)",
"time": "2025-10-03 12:34:56 PM PDT-0700"
}
```
- **`user_message`** - Standard user messages
```json
{
"type": "user_message",
"message": "Hello, agent!",
"time": "2025-10-03 12:34:56 PM PDT-0700"
}
```
- **`system_alert`** - System notifications and alerts
```json
{
"type": "system_alert",
"message": "System notification text",
"time": "2025-10-03 12:34:56 PM PDT-0700"
}
```
#### `system_message`
Messages generated by the system, typically used for internal context.
**Structure:**
```typescript
{
id: string;
date: datetime;
message_type: "system_message";
content: string;
name?: string;
}
```
**Note:** System messages are never streamed back in responses; they're only visible when paginating through message history.
### Agent Reasoning and Responses
#### `reasoning_message`
Represents the agent's internal reasoning or "chain of thought."
**Structure:**
```typescript
{
id: string;
date: datetime;
message_type: "reasoning_message";
reasoning: string;
source: "reasoner_model" | "non_reasoner_model";
signature?: string;
}
```
**Fields:**
- `reasoning` - The agent's internal thought process
- `source` - Whether this was generated by a model with native reasoning (like o1) or via prompting
- `signature` - Optional cryptographic signature for reasoning verification (for models that support it)
#### `hidden_reasoning_message`
Represents reasoning that has been hidden from the response.
**Structure:**
```typescript
{
id: string;
date: datetime;
message_type: "hidden_reasoning_message";
state: "redacted" | "omitted";
hidden_reasoning?: string;
}
```
**Fields:**
- `state: "redacted"` - The provider redacted the reasoning content
- `state: "omitted"` - The API chose not to include reasoning (e.g., for o1/o3 models)
#### `assistant_message`
The actual message content sent by the agent (typically via the `send_message` tool).
**Structure:**
```typescript
{
id: string;
date: datetime;
message_type: "assistant_message";
content: string | Array<TextContent>;
name?: string;
}
```
### Tool Execution Messages
#### `tool_call_message`
A request from the agent to execute a tool.
**Structure:**
```typescript
{
id: string;
date: datetime;
message_type: "tool_call_message";
tool_call: {
name: string;
arguments: string; // JSON string
tool_call_id: string;
};
}
```
**Example:**
```typescript
{
message_type: "tool_call_message",
tool_call: {
name: "archival_memory_search",
arguments: '{"query": "user preferences", "page": 0}',
tool_call_id: "call_abc123"
}
}
```
#### `tool_return_message`
The result of a tool execution.
**Structure:**
```typescript
{
id: string;
date: datetime;
message_type: "tool_return_message";
tool_return: string;
status: "success" | "error";
tool_call_id: string;
stdout?: string[];
stderr?: string[];
}
```
**Fields:**
- `tool_return` - The formatted return value from the tool
- `status` - Whether the tool executed successfully
- `stdout`/`stderr` - Captured output from the tool execution (useful for debugging)
### Human-in-the-Loop Messages
#### `approval_request_message`
A request for human approval before executing a tool.
**Structure:**
```typescript
{
id: string;
date: datetime;
message_type: "approval_request_message";
tool_call: {
name: string;
arguments: string;
tool_call_id: string;
};
}
```
See [Human-in-the-Loop](/guides/agents/human_in_the_loop) for more information on this experimental feature.
#### `approval_response_message`
The user's response to an approval request.
**Structure:**
```typescript
{
id: string;
date: datetime;
message_type: "approval_response_message";
approve: boolean;
approval_request_id: string;
reason?: string;
}
```
## Working with Messages
### Listing Messages
<CodeGroup>
```typescript TypeScript
import { LettaClient } from "@letta-ai/letta-client";
const client = new LettaClient({
baseUrl: "https://api.letta.com",
});
// List recent messages
const messages = await client.agents.messages.list("agent-id", {
limit: 50,
useAssistantMessage: true,
});
// Iterate through message types
for (const message of messages) {
switch (message.messageType) {
case "user_message":
console.log("User:", message.content);
break;
case "assistant_message":
console.log("Agent:", message.content);
break;
case "reasoning_message":
console.log("Reasoning:", message.reasoning);
break;
case "tool_call_message":
console.log("Tool call:", message.toolCall.name);
break;
// ... handle other types
}
}
```
```python Python
from letta_client import Letta
client = Letta(base_url="https://api.letta.com")
# List recent messages
messages = client.agents.messages.list(
agent_id="agent-id",
limit=50,
use_assistant_message=True
)
# Iterate through message types
for message in messages:
if message.message_type == "user_message":
print(f"User: {message.content}")
elif message.message_type == "assistant_message":
print(f"Agent: {message.content}")
elif message.message_type == "reasoning_message":
print(f"Reasoning: {message.reasoning}")
elif message.message_type == "tool_call_message":
print(f"Tool call: {message.tool_call.name}")
# ... handle other types
```
</CodeGroup>
### Filtering Messages by Type
<CodeGroup>
```typescript TypeScript
// Get only assistant messages (what the agent said to the user)
const agentMessages = messages.filter(
(msg) => msg.messageType === "assistant_message"
);
// Get all tool-related messages
const toolMessages = messages.filter(
(msg) => msg.messageType === "tool_call_message" ||
msg.messageType === "tool_return_message"
);
// Get conversation history (user + assistant messages only)
const conversation = messages.filter(
(msg) => msg.messageType === "user_message" ||
msg.messageType === "assistant_message"
);
```
```python Python
# Get only assistant messages (what the agent said to the user)
agent_messages = [
msg for msg in messages
if msg.message_type == "assistant_message"
]
# Get all tool-related messages
tool_messages = [
msg for msg in messages
if msg.message_type in ["tool_call_message", "tool_return_message"]
]
# Get conversation history (user + assistant messages only)
conversation = [
msg for msg in messages
if msg.message_type in ["user_message", "assistant_message"]
]
```
</CodeGroup>
### Filtering Out Special User Messages
When working with user messages, you may want to filter out internal system messages like heartbeats:
<CodeGroup>
```typescript TypeScript
import { parse } from "json5";
function isHeartbeat(content: string): boolean {
try {
const parsed = JSON.parse(content);
return parsed.type === "heartbeat";
} catch {
return false;
}
}
// Filter out heartbeat messages
const userMessages = messages
.filter((msg) => msg.messageType === "user_message")
.filter((msg) => {
if (typeof msg.content === "string") {
return !isHeartbeat(msg.content);
}
return true;
});
```
```python Python
import json
def is_heartbeat(content: str) -> bool:
try:
parsed = json.loads(content)
return parsed.get("type") == "heartbeat"
except (json.JSONDecodeError, ValueError):
return False
# Filter out heartbeat messages
user_messages = [
msg for msg in messages
if msg.message_type == "user_message" and
(not isinstance(msg.content, str) or not is_heartbeat(msg.content))
]
```
</CodeGroup>
### Pagination
Messages support cursor-based pagination:
<CodeGroup>
```typescript TypeScript
// Get first page
let messages = await client.agents.messages.list("agent-id", {
limit: 100,
});
// Get next page using the last message ID
const lastMessageId = messages[messages.length - 1].id;
const nextPage = await client.agents.messages.list("agent-id", {
limit: 100,
before: lastMessageId,
});
```
```python Python
# Get first page
messages = client.agents.messages.list(
agent_id="agent-id",
limit=100
)
# Get next page using the last message ID
last_message_id = messages[-1].id
next_page = client.agents.messages.list(
agent_id="agent-id",
limit=100,
before=last_message_id
)
```
</CodeGroup>
## Message Metadata Fields
All message types include these common fields:
- **`id`** - Unique identifier for the message
- **`date`** - ISO 8601 timestamp of when the message was created
- **`message_type`** - The discriminator field identifying the message type
- **`name`** - Optional name field (varies by message type)
- **`otid`** - Offline threading ID for message correlation
- **`sender_id`** - The ID of the sender (identity or agent ID)
- **`step_id`** - The step ID associated with this message
- **`is_err`** - Whether this message is part of an error step (debugging only)
- **`seq_id`** - Sequence ID for ordering
- **`run_id`** - The run ID associated with this message
## Best Practices
### 1. Use Type Discriminators
Always check the `message_type` field to safely access type-specific fields:
<CodeGroup>
```typescript TypeScript
if (message.messageType === "tool_call_message") {
// TypeScript now knows message has a toolCall field
console.log(message.toolCall.name);
}
```
```python Python
if message.message_type == "tool_call_message":
# Safe to access tool_call
print(message.tool_call.name)
```
</CodeGroup>
### 2. Handle Special User Messages
When displaying conversations to end users, filter out internal messages:
```python
def is_internal_message(msg):
"""Check if a user message is internal (heartbeat, login, etc.)"""
if msg.message_type != "user_message":
return False
if not isinstance(msg.content, str):
return False
try:
parsed = json.loads(msg.content)
return parsed.get("type") in ["heartbeat", "login", "system_alert"]
except:
return False
# Get user-facing messages only
display_messages = [
msg for msg in messages
if not is_internal_message(msg)
]
```
### 3. Track Tool Execution
Match tool calls with their returns using `tool_call_id`:
```python
# Build a map of tool calls to their returns
tool_calls = {
msg.tool_call.tool_call_id: msg
for msg in messages
if msg.message_type == "tool_call_message"
}
tool_returns = {
msg.tool_call_id: msg
for msg in messages
if msg.message_type == "tool_return_message"
}
# Find failed tool calls
for call_id, call_msg in tool_calls.items():
if call_id in tool_returns:
return_msg = tool_returns[call_id]
if return_msg.status == "error":
print(f"Tool {call_msg.tool_call.name} failed:")
print(f" {return_msg.tool_return}")
```
## See Also
- [Heartbeats](/guides/agents/heartbeats) - Understanding heartbeat messages and tool chaining
- [Human-in-the-Loop](/guides/agents/human_in_the_loop) - Using approval messages
- [Streaming Responses](/guides/agents/streaming) - Receiving messages in real-time
- [API Reference](/api-reference/agents/messages/list) - Full API documentation

View File

@@ -5,7 +5,18 @@ slug: guides/agents/messages
## Sending messages
You can send message to agents from both the REST API and Python client:
```python
<CodeGroup>
```typescript TypeScript
// message an agent as a user
const response = await client.sendMessage(
agent_state.id,
"user",
"hello"
);
console.log("Usage", response.usage);
console.log("Agent messages", response.messages);
```
```python Python
# message an agent as a user
response = client.send_message(
agent_id=agent_state.id,
@@ -15,8 +26,20 @@ response = client.send_message(
print("Usage", response.usage)
print("Agent messages", response.messages)
```
</CodeGroup>
You can also send messages with different roles, such as `system`, `assistant`, or `user`:
```python
<CodeGroup>
```typescript TypeScript
// message a system message (non-user)
const response = await client.sendMessage(
agent_state.id,
"system",
"[system] user has logged in. send a friendly message."
);
console.log("Usage", response.usage);
console.log("Agent messages", response.messages);
```
```python Python
# message a system message (non-user)
response = client.send_message(
agent_id=agent_state.id,
@@ -26,6 +49,7 @@ response = client.send_message(
print("Usage", response.usage)
print("Agent messages", response.messages)
```
</CodeGroup>
The `response` object contains the following attributes:
* `usage`: The usage of the agent after the message was sent (the prompt tokens, completition tokens, and total tokens)
* `message`: A list of either `Message` or `LettaMessage` objects, generated by the agent
@@ -44,15 +68,30 @@ The `LettaMessage` object is a simplified version of the `Message` object. Since
#### `Message`
The `Message` object is the raw MemGPT message representation that is persisted in the database. To have the full `Message` data returns, you can set `include_full_message=True`:
```python
<CodeGroup>
```typescript TypeScript
const response = await client.userMessage(
agent_state.id,
"hello!",
true // include_full_message
);
```
```python Python
response = client.user_message(
agent_id=agent_state.id,
message="hello!",
include_full_message=True
)
```
</CodeGroup>
You can convert a raw `Message` object to a list of `LettaMessage` objects:
```python
<CodeGroup>
```typescript TypeScript
// Convert a `Message` object to a `LettaMessage` object
const lettaMessages = message.toLettaMessage();
```
```python Python
# Convert a `Message` object to a `LettaMessage` object
letta_messages = message.to_letta_message()
```
</CodeGroup>

View File

@@ -29,13 +29,22 @@ There are three built-in tools for cross-agent communication:
* and `send_message_to_agents_matching_all_tags` for a "supervisor-worker" pattern
### Messaging another agent (async / no wait)
```python
<CodeGroup>
```typescript TypeScript
// The function signature for the async multi-agent messaging tool
function sendMessageToAgentAsync(
message: string,
otherAgentId: string
): string
```
```python Python
# The function signature for the async multi-agent messaging tool
def send_message_to_agent_async(
message: str,
other_agent_id: str,
): -> str
```
</CodeGroup>
```mermaid
sequenceDiagram
autonumber
@@ -51,13 +60,22 @@ This tool is **asynchronous**: instead of waiting for a response from the target
The message that is sent to the target agent contains a "message receipt", indicating which agent sent the message, which allows the target agent to reply to the sender (assuming they also have access to the `send_message_to_agent_async` tool).
### Messaging another agent (wait for reply)
```python
<CodeGroup>
```typescript TypeScript
// The function signature for the synchronous multi-agent messaging tool
function sendMessageToAgentAndWaitForReply(
message: string,
otherAgentId: string
): string
```
```python Python
# The function signature for the synchronous multi-agent messaging tool
def send_message_to_agent_and_wait_for_reply(
message: str,
other_agent_id: str,
): -> str
```
</CodeGroup>
```mermaid
sequenceDiagram
autonumber
@@ -71,13 +89,22 @@ However, this tool is **synchronous**: the agent will wait for a response from t
The response of the target agent is returned in the tool output - if the target agent does not respond, the tool will return default message indicating no response was received.
### Messaging a group of agents (supervisor-worker pattern)
```python
<CodeGroup>
```typescript TypeScript
// The function signature for the group broadcast multi-agent messaging tool
function sendMessageToAgentsMatchingAllTags(
message: string,
tags: string[]
): string[]
```
```python Python
# The function signature for the group broadcast multi-agent messaging tool
def send_message_to_agents_matching_all_tags(
message: str,
tags: List[str],
) -> List[str]:
```
</CodeGroup>
```mermaid
sequenceDiagram
autonumber

View File

@@ -12,7 +12,28 @@ You can also write your own agent communication tools by using the Letta API and
Since Letta runs as a service, you can make request to the server from a custom tool to send messages to other agents via API calls.
Here's a simple example of a tool that sends a message to a specific agent:
```python title="python"
<CodeGroup>
```typescript TypeScript
async function customSendMessageToAgent(targetAgentId: string, messageContents: string) {
/**
* Send a message to a specific Letta agent.
*
* @param targetAgentId - The identifier of the target Letta agent.
* @param messageContents - The message to be sent to the target Letta agent.
*/
const { LettaClient } = require('@letta-ai/letta-client');
// TODO: point this to the server where the worker agents are running
const client = new LettaClient({baseUrl: "http://127.0.0.1:8283"});
// message all worker agents async
const response = await client.agents.sendMessageAsync(
targetAgentId,
messageContents
);
}
```
```python Python
def custom_send_message_to_agent(target_agent_id: str, message_contents: str):
"""
Send a message to a specific Letta agent.
@@ -32,9 +53,31 @@ def custom_send_message_to_agent(target_agent_id: str, message_contents: str):
message=message_contents,
)
```
</CodeGroup>
Below is an example of a tool that triggers agents tagged with `worker` to start their tasks:
```python title="python"
<CodeGroup>
```typescript TypeScript
async function triggerWorkerAgents() {
/**
* Trigger worker agents to start their tasks, without waiting for a response.
*/
const { LettaClient } = require('@letta-ai/letta-client');
// TODO: point this to the server where the worker agents are running
const client = new LettaClient({baseUrl: "http://127.0.0.1:8283"});
// message all worker agents async
const agents = await client.agents.list({tags: ["worker"]});
for (const agent of agents) {
const response = await client.agents.sendMessageAsync(
agent.id,
"Start my task"
);
}
}
```
```python Python
def trigger_worker_agents():
"""
Trigger worker agents to start their tasks, without waiting for a response.
@@ -51,3 +94,4 @@ def trigger_worker_agents():
message="Start my task",
)
```
</CodeGroup>

View File

@@ -26,41 +26,7 @@ In the example code below, we create a shared memory block and attach it to a su
Because the memory block is shared, when one agent writes to it, the other agent can read the updates immediately.
<CodeGroup>
```python title="python" maxLines=50
# install letta_client with `pip install letta-client`
from letta_client import Letta
# create a client to connect to Letta
client = Letta(token="LETTA_API_KEY")
# create a shared memory block
shared_block = client.blocks.create(
label="organization",
description="Shared information between all agents within the organization.",
value="Nothing here yet, we should update this over time."
)
# create a supervisor agent
supervisor_agent = client.agents.create(
model="anthropic/claude-3-5-sonnet-20241022",
embedding="openai/text-embedding-3-small",
# blocks created for this agent
memory_blocks=[{"label": "persona", "value": "I am a supervisor"}],
# pre-existing shared block that is "attached" to this agent
block_ids=[shared_block.id],
)
# create a worker agent
worker_agent = client.agents.create(
model="anthropic/claude-3-5-sonnet-20241022",
embedding="openai/text-embedding-3-small",
# blocks created for this agent
memory_blocks=[{"label": "persona", "value": "I am a worker"}],
# pre-existing shared block that is "attached" to this agent
block_ids=[shared_block.id],
)
```
```typescript title="node.js" maxLines=50
```typescript TypeScript maxLines=50
// install letta-client with `npm install @letta-ai/letta-client`
import { LettaClient } from '@letta-ai/letta-client'
@@ -96,6 +62,40 @@ const workerAgent = await client.agents.create({
blockIds: [sharedBlock.id]
});
```
```python title="python" maxLines=50
# install letta_client with `pip install letta-client`
from letta_client import Letta
# create a client to connect to Letta
client = Letta(token="LETTA_API_KEY")
# create a shared memory block
shared_block = client.blocks.create(
label="organization",
description="Shared information between all agents within the organization.",
value="Nothing here yet, we should update this over time."
)
# create a supervisor agent
supervisor_agent = client.agents.create(
model="anthropic/claude-3-5-sonnet-20241022",
embedding="openai/text-embedding-3-small",
# blocks created for this agent
memory_blocks=[{"label": "persona", "value": "I am a supervisor"}],
# pre-existing shared block that is "attached" to this agent
block_ids=[shared_block.id],
)
# create a worker agent
worker_agent = client.agents.create(
model="anthropic/claude-3-5-sonnet-20241022",
embedding="openai/text-embedding-3-small",
# blocks created for this agent
memory_blocks=[{"label": "persona", "value": "I am a worker"}],
# pre-existing shared block that is "attached" to this agent
block_ids=[shared_block.id],
)
```
</CodeGroup>
Memory blocks can also be accessed by other agents, even if not shared.

View File

@@ -33,34 +33,7 @@ You can pass images to your agents by drag-and-dropping them into the chat windo
### Sending an Image via URL
<CodeGroup>
```python title="python" maxLines=100
from letta_client import Letta
client = Letta(token="LETTA_API_KEY")
response = client.agents.messages.create(
agent_id=agent_state.id,
messages=[
{
"role": "user",
"content": [
{
"type": "image",
"source": {
"type": "url",
"url": "https://upload.wikimedia.org/wikipedia/commons/a/a7/Camponotus_flavomarginatus_ant.jpg",
},
},
{
"type": "text",
"text": "Describe this image."
}
],
}
],
)
```
```typescript title="node.js" maxLines=100
```typescript TypeScript maxLines=100
import { LettaClient } from '@letta-ai/letta-client';
const client = new LettaClient({ token: "LETTA_API_KEY" });
@@ -88,21 +61,11 @@ const response = await client.agents.messages.create(
}
);
```
</CodeGroup>
### Sending an Image via Base64
<CodeGroup>
```python title="python" maxLines=100
import base64
import httpx
from letta_client import Letta
client = Letta(token="LETTA_API_KEY")
image_url = "https://upload.wikimedia.org/wikipedia/commons/a/a7/Camponotus_flavomarginatus_ant.jpg"
image_data = base64.standard_b64encode(httpx.get(image_url).content).decode("utf-8")
response = client.agents.messages.create(
agent_id=agent_state.id,
messages=[
@@ -112,9 +75,8 @@ response = client.agents.messages.create(
{
"type": "image",
"source": {
"type": "base64",
"media_type": "image/jpeg",
"data": image_data,
"type": "url",
"url": "https://upload.wikimedia.org/wikipedia/commons/a/a7/Camponotus_flavomarginatus_ant.jpg",
},
},
{
@@ -126,7 +88,12 @@ response = client.agents.messages.create(
],
)
```
```typescript title="node.js" maxLines=100
</CodeGroup>
### Sending an Image via Base64
<CodeGroup>
```typescript TypeScript maxLines=100
import { LettaClient } from '@letta-ai/letta-client';
const client = new LettaClient({ token: "LETTA_API_KEY" });
@@ -160,4 +127,37 @@ const response = await client.agents.messages.create(
}
);
```
```python title="python" maxLines=100
import base64
import httpx
from letta_client import Letta
client = Letta(token="LETTA_API_KEY")
image_url = "https://upload.wikimedia.org/wikipedia/commons/a/a7/Camponotus_flavomarginatus_ant.jpg"
image_data = base64.standard_b64encode(httpx.get(image_url).content).decode("utf-8")
response = client.agents.messages.create(
agent_id=agent_state.id,
messages=[
{
"role": "user",
"content": [
{
"type": "image",
"source": {
"type": "base64",
"media_type": "image/jpeg",
"data": image_data,
},
},
{
"type": "text",
"text": "Describe this image."
}
],
}
],
)
```
</CodeGroup>

View File

@@ -51,7 +51,7 @@ agent = client.agents.create(
)
```
```typescript title="node.js"
```typescript TypeScript
// assumes that you already instantiated a client
const identity = await client.identities.create({
identifierKey: "user_1",
@@ -80,7 +80,7 @@ user_agents = client.agents.list(
identifier_keys=["user_1"]
)
```
```typescript title="node.js"
```typescript TypeScript
// assumes that you already instantiated a client
await client.agents.list({
identifierKeys: ["user_1"]
@@ -110,7 +110,7 @@ identity = client.identities.create({
agent_ids=["agent-00000000-0000-4000-8000-000000000000"]
})
```
```typescript title="node.js"
```typescript TypeScript
// assumes that you already instantiated a client
const identity = await client.identities.create({
identifierKey: "user_1",
@@ -122,10 +122,33 @@ const identity = await client.identities.create({
</CodeBlocks>
### Using Agent Tags to Identify Users
It's also possible to utilize our agent tags feature to associate agents with specific users. To associate agents you create in Letta with your users, you can specify a tag when creating an agent, and set the tag to the users unique ID.
It's also possible to utilize our agent tags feature to associate agents with specific users. To associate agents you create in Letta with your users, you can specify a tag when creating an agent, and set the tag to the user's unique ID.
This example assumes that you have a self-hosted Letta server running on localhost (for example, by running [`docker run ...`](/guides/server/docker)).
<Accordion title="View example Python SDK code">
```python title="python"
<Accordion title="View example SDK code">
<CodeGroup>
```typescript TypeScript
import { LettaClient } from '@letta-ai/letta-client';
// in this example we'll connect to a self-hosted Letta server
const client = new LettaClient({baseUrl: "http://localhost:8283"});
const userId = "my_uuid";
// create an agent with the userId tag
const agent = await client.agents.create({
memoryBlocks: [],
model: "anthropic/claude-3-5-sonnet-20241022",
contextWindowLimit: 200000,
embedding: "openai/text-embedding-3-small",
tags: [userId]
});
console.log(`Created agent with id ${agent.id}, tags ${agent.tags}`);
// list agents
const userAgents = await client.agents.list({tags: [userId]});
const agentIds = userAgents.map(agent => agent.id);
console.log(`Found matching agents ${agentIds}`);
```
```python Python
from letta_client import Letta
# in this example we'll connect to a self-hosted Letta server
@@ -147,6 +170,7 @@ user_agents = client.agents.list(tags=[user_id])
agent_ids = [agent.id for agent in user_agents]
print(f"Found matching agents {agent_ids}")
```
</CodeGroup>
</Accordion>
## Creating and Viewing Tags in the ADE

View File

@@ -2,6 +2,11 @@
title: Building Stateful Agents with Letta
slug: guides/agents/overview
---
<Info>
**New to Letta?** If you haven't already, read [Core Concepts](/core-concepts) to understand how Letta's stateful agents are fundamentally different from traditional LLM APIs.
</Info>
Letta agents can automatically manage long-term memory, load data from external sources, and call custom tools.
Unlike in other frameworks, Letta agents are stateful, so they keep track of historical interactions and reserve part of their context to read and write memories which evolve over time.
<img className="light" src="/images/stateful_agents.png" />
@@ -120,7 +125,7 @@ agent_state = client.agents.create(
# the AgentState object contains all the information about the agent
print(agent_state)
```
```typescript maxLines=50 title="node.js"
```typescript TypeScript maxLines=50
// install letta-client with `npm install @letta-ai/letta-client`
import { LettaClient } from '@letta-ai/letta-client'
@@ -199,7 +204,7 @@ print(response.usage)
for message in response.messages:
print(message)
```
```typescript maxLines=50 title="node.js"
```typescript TypeScript maxLines=50
// send a message to the agent
const response = await client.agents.messages.create(
agentState.id, {

View File

@@ -26,6 +26,20 @@ To create a ReAct agent, simply use the `react_agent` agent type when creating y
There is no need to pass any memory blocks to the agent, since ReAct agents do not have any long-term memory.
<CodeGroup>
```typescript TypeScript
import { LettaClient } from '@letta-ai/letta-client'
const client = new LettaClient({ token: "LETTA_API_KEY" });
// create the ReAct agent
const agent = await client.agents.create({
agentType: "react_agent",
model: "openai/gpt-4.1",
embedding: "openai/text-embedding-3-small",
tools: ["web_search", "run_code"]
});
```
```python title="python"
from letta_client import Letta
@@ -40,20 +54,6 @@ agent = client.agents.create(
)
```
```typescript title="node.js"
import { LettaClient } from '@letta-ai/letta-client'
const client = new LettaClient({ token: "LETTA_API_KEY" });
// create the ReAct agent
const agent = await client.agents.create({
agentType: "react_agent",
model: "openai/gpt-4.1",
embedding: "openai/text-embedding-3-small",
tools: ["web_search", "run_code"]
});
```
```bash title="curl"
curl -X POST https://api.letta.com/v1/agents \
-H "Authorization: Bearer $LETTA_API_KEY" \

View File

@@ -25,6 +25,24 @@ This guide covers simple approaches to implement scheduled agent interactions.
The most straightforward approach for development and testing:
<CodeGroup>
```typescript TypeScript
import { LettaClient } from '@letta-ai/letta-client';
const client = new LettaClient({ baseUrl: "http://localhost:8283" });
const agentId = "your_agent_id";
while (true) {
const response = await client.agents.messages.create(agentId, {
messages: [{
role: "user",
content: `Scheduled check at ${new Date()}`
}]
});
console.log(`[${new Date()}] Agent responded`);
await new Promise(resolve => setTimeout(resolve, 300000)); // 5 minutes
}
```
```python title="python"
import time
from letta_client import Letta
@@ -44,24 +62,6 @@ while True:
print(f"[{datetime.now()}] Agent responded")
time.sleep(300) # 5 minutes
```
```typescript title="node.js"
import { LettaClient } from '@letta-ai/letta-client';
const client = new LettaClient({ baseUrl: "http://localhost:8283" });
const agentId = "your_agent_id";
while (true) {
const response = await client.agents.messages.create(agentId, {
messages: [{
role: "user",
content: `Scheduled check at ${new Date()}`
}]
});
console.log(`[${new Date()}] Agent responded`);
await new Promise(resolve => setTimeout(resolve, 300000)); // 5 minutes
}
```
</CodeGroup>
**Pros:** Simple, easy to debug
@@ -72,26 +72,7 @@ while (true) {
For production deployments, use cron for reliability:
<CodeGroup>
```python title="python"
#!/usr/bin/env python3
from letta_client import Letta
from datetime import datetime
try:
client = Letta(base_url="http://localhost:8283")
response = client.agents.messages.create(
agent_id="your_agent_id",
messages=[{
"role": "user",
"content": "Scheduled maintenance check"
}]
)
print(f"[{datetime.now()}] Success")
except Exception as e:
print(f"[{datetime.now()}] Error: {e}")
```
```typescript title="node.js"
```typescript TypeScript
#!/usr/bin/env node
import { LettaClient } from '@letta-ai/letta-client';
@@ -112,6 +93,25 @@ async function sendMessage() {
sendMessage();
```
```python title="python"
#!/usr/bin/env python3
from letta_client import Letta
from datetime import datetime
try:
client = Letta(base_url="http://localhost:8283")
response = client.agents.messages.create(
agent_id="your_agent_id",
messages=[{
"role": "user",
"content": "Scheduled maintenance check"
}]
)
print(f"[{datetime.now()}] Success")
except Exception as e:
print(f"[{datetime.now()}] Error: {e}")
```
</CodeGroup>
Add to crontab with `crontab -e`:
@@ -136,6 +136,40 @@ Add to crontab with `crontab -e`:
Complete example that performs periodic memory cleanup:
<CodeGroup>
```typescript TypeScript
#!/usr/bin/env node
import { LettaClient } from '@letta-ai/letta-client';
async function runMaintenance() {
try {
const client = new LettaClient({ baseUrl: "http://localhost:8283" });
const agentId = "your_agent_id";
const response = await client.agents.messages.create(agentId, {
messages: [{
role: "user",
content: "Please review your memory blocks for outdated information and clean up as needed."
}]
});
// Print any assistant messages
for (const message of response.messages) {
if (message.messageType === "assistant_message") {
console.log(`Agent response: ${message.content?.substring(0, 100)}...`);
}
}
} catch (error) {
console.error("Maintenance failed:", error);
}
}
// Run if called directly
if (import.meta.url === `file://${process.argv[1]}`) {
runMaintenance();
}
```
```python title="python"
#!/usr/bin/env python3
import logging
@@ -171,40 +205,6 @@ def run_maintenance():
if __name__ == "__main__":
run_maintenance()
```
```typescript title="node.js"
#!/usr/bin/env node
import { LettaClient } from '@letta-ai/letta-client';
async function runMaintenance() {
try {
const client = new LettaClient({ baseUrl: "http://localhost:8283" });
const agentId = "your_agent_id";
const response = await client.agents.messages.create(agentId, {
messages: [{
role: "user",
content: "Please review your memory blocks for outdated information and clean up as needed."
}]
});
// Print any assistant messages
for (const message of response.messages) {
if (message.messageType === "assistant_message") {
console.log(`Agent response: ${message.content?.substring(0, 100)}...`);
}
}
} catch (error) {
console.error("Maintenance failed:", error);
}
}
// Run if called directly
if (import.meta.url === `file://${process.argv[1]}`) {
runMaintenance();
}
```
</CodeGroup>
Choose the scheduling method that best fits your deployment environment. For production systems, cron offers the best reliability, while simple loops are perfect for development and testing.

View File

@@ -4,6 +4,10 @@ subtitle: Based on the new sleep-time compute research paper
slug: guides/agents/architectures/sleeptime
---
<Warning>
Sleep-time agents are experimental and may be unstable. For more information, visit our [Discord](https://discord.gg/letta).
</Warning>
<Note>
To learn more about sleep-time compute, check out our [blog](https://www.letta.com/blog/sleep-time-compute) and [research paper](https://arxiv.org/abs/2504.13171).
</Note>
@@ -26,6 +30,13 @@ In Letta, the learned context is saved in a memory block. A memory block represe
Memory blocks can be access directly through the API to be updated, retrieved, or deleted.
<CodeGroup>
```typescript TypeScript
// get a block by label
const block = await client.agents.blocks.retrieve(agentId, "persona");
// get a block by ID
const block = await client.blocks.retrieve(blockId);
```
```python title="python"
# get a block by label
block = client.agents.blocks.retrieve(agent_id=agent_id, block_label="persona")
@@ -33,13 +44,6 @@ block = client.agents.blocks.retrieve(agent_id=agent_id, block_label="persona")
# get a block by ID
block = client.blocks.retrieve(block_id=block_id)
```
```typescript title="node.js"
// get a block by label
const block = await client.agents.blocks.retrieve(agentId, "persona");
// get a block by ID
const block = await client.blocks.retrieve(blockId);
```
</CodeGroup>
When sleep-time is enabled for an agent, there will be one or more sleep-time agents created to manage the memory blocks of the primary agent. These sleep-time agents will run in the background and can modify the memory blocks of the primary agent asynchronously. One sleep-time agent (created when the primary agent is created) will generate learned context from the conversation history to update the memory blocks of the primary agent. Additional ephemeral sleep-time agents will be created when you add data into data sources of the primary agent to process the data sources in the background. These ephemeral agents will create and write to a block specific to the data source, and be deleted once they are finished processing the data sources.
@@ -57,6 +61,35 @@ When a `sleeptime_agent` is created, a primary agent and a sleep-time agent are
The sleep-time agent will be triggered every N-steps (default `5`) to update the memory blocks of the primary agent. You can configure the frequency of updates by setting the `sleeptime_agent_frequency` parameter when creating the agent.
<CodeGroup>
```typescript TypeScript maxLines=50
import { LettaClient, SleeptimeManagerUpdate } from '@letta-ai/letta-client'
const client = new LettaClient({ token: "LETTA_API_KEY" });
// create a sleep-time-enabled agent
const agent = await client.agents.create({
memoryBlocks: [
{ value: "", label: "human" },
{ value: "You are a helpful assistant.", label: "persona" }
],
model: "anthropic/claude-3-7-sonnet-20250219",
embedding: "openai/text-embedding-3-small",
enableSleeptime: true
});
console.log(`Created agent id ${agent.id}`);
// get the multi-agent group
const groupId = agent.multiAgentGroup.id;
const currentFrequency = agent.multiAgentGroup.sleeptimeAgentFrequency;
console.log(`Group id: ${groupId}, frequency: ${currentFrequency}`);
// update the frequency to every 2 steps
const group = await client.groups.modify(groupId, {
managerConfig: {
sleeptimeAgentFrequency: 2
} as SleeptimeManagerUpdate
});
```
```python title="python" maxLines=50
from letta_client import Letta
from letta_client.types import SleeptimeManagerUpdate
@@ -88,127 +121,5 @@ group = client.groups.modify(
),
)
```
```typescript title="node.js" maxLines=50
import { LettaClient, SleeptimeManagerUpdate } from '@letta-ai/letta-client'
const client = new LettaClient({ token: "LETTA_API_KEY" });
// create a sleep-time-enabled agent
const agent = await client.agents.create({
memoryBlocks: [
{ value: "", label: "human" },
{ value: "You are a helpful assistant.", label: "persona" }
],
model: "anthropic/claude-3-7-sonnet-20250219",
embedding: "openai/text-embedding-3-small",
enableSleeptime: true
});
console.log(`Created agent id ${agent.id}`);
// get the multi-agent group
const groupId = agent.multiAgentGroup.id;
const currentFrequency = agent.multiAgentGroup.sleeptimeAgentFrequency;
console.log(`Group id: ${groupId}, frequency: ${currentFrequency}`);
// update the frequency to every 2 steps
const group = await client.groups.modify(groupId, {
managerConfig: {
sleeptimeAgentFrequency: 2
} as SleeptimeManagerUpdate
});
```
</CodeGroup>
We recommend keeping the frequency relatively high (e.g. 5 or 10) as triggering the sleep-time agent too often can be expensive (due to high token usage) and has diminishing returns.
## Sleep-time agents for data sources
<img className="light" src="/images/sleeptime_data.png" />
<img className="dark" src="/images/sleeptime_data_dark.png" />
Sleep-time-enabled agents will spawn additional ephemeral sleep-time agents when you add data into data sources of the primary agent to process the data sources in the background. These ephemeral agents will create and write to a block specific to the data source, and be deleted once they are finished processing the data sources.
When a file is uploaded to a data source, it is parsed into passages (chunks of text) which are embedded and saved into the main agent's archival memory. If sleeptime is enabled, the sleep-time agent will also process each passage's text to update the memory block corresponding to the data source. The sleep-time agent will create an `instructions` block that contains the data source description, to help guide the learned context generation.
<img src="/images/sleeptime_data_source.gif" />
<Tip>
Give your data sources an informative `name` and `description` when creating them to help the sleep-time agent generate better learned context, and to help the primary agent understand what the associated memory block is for.
</Tip>
Below is an example of using the SDK to attach a data source to a sleep-time-enabled agent:
<CodeGroup>
```python title="python" maxLines=50
from letta_client import Letta
client = Letta(token="LETTA_API_KEY")
agent = client.agents.create(
memory_blocks=[
{"value": "", "label": "human"},
{"value": "You are a helpful assistant.", "label": "persona"},
],
model="anthropic/claude-3-7-sonnet-20250219",
embedding="openai/text-embedding-3-small",
enable_sleeptime=True,
)
print(f"Created agent id {agent.id}")
# create a source
source_name = "employee_handbook"
source = client.sources.create(
name=source_name,
description="Provides reference information for the employee handbook",
embedding="openai/text-embedding-3-small" # must match agent
)
# attach the source to the agent
client.agents.sources.attach(
source_id=source.id,
agent_id=agent.id
)
# upload a file: this will trigger processing
job = client.sources.files.upload(
file=open("handbook.pdf", "rb"),
source_id=source.id
)
```
```typescript title="node.js" maxLines=50
import { LettaClient } from '@letta-ai/letta-client'
import { readFileSync } from 'fs';
const client = new LettaClient({ token: "LETTA_API_KEY" });
const agent = await client.agents.create({
memoryBlocks: [
{ value: "", label: "human" },
{ value: "You are a helpful assistant.", label: "persona" }
],
model: "anthropic/claude-3-7-sonnet-20250219",
embedding: "openai/text-embedding-3-small",
enableSleeptime: true
});
console.log(`Created agent id ${agent.id}`);
// create a source
const sourceName = "employee_handbook";
const source = await client.sources.create({
name: sourceName,
description: "Provides reference information for the employee handbook",
embedding: "openai/text-embedding-3-small" // must match agent
});
// attach the source to the agent
await client.agents.sources.attach(agent.id, source.id);
// upload a file: this will trigger processing
const file = new Blob([readFileSync("handbook.pdf")]);
const job = await client.sources.files.upload(source.id, file);
```
</CodeGroup>
This code will create and attach a memory block with the label `employee_handbook` to the agent. An ephemeral sleep-time agent will be created to process the data source and write to the memory block, and be deleted once all the passages in the data source have been processed.
<Warning>
Processing each `Passage` from a data source will invoke many LLM requests by the sleep-time agent, so you should only process relatively small files (a few MB) of data.
</Warning>

View File

@@ -18,25 +18,6 @@ Letta supports two streaming modes: **step streaming** (default) and **token str
To enable streaming, use the [`/v1/agents/{agent_id}/messages/stream`](/api-reference/agents/messages/stream) endpoint instead of `/messages`:
<CodeGroup>
```python title="python"
# Step streaming (default) - returns complete messages
stream = client.agents.messages.create_stream(
agent_id=agent.id,
messages=[{"role": "user", "content": "Hello!"}]
)
for chunk in stream:
print(chunk) # Complete message objects
# Token streaming - returns partial chunks for real-time UX
stream = client.agents.messages.create_stream(
agent_id=agent.id,
messages=[{"role": "user", "content": "Hello!"}],
stream_tokens=True # Enable token streaming
)
for chunk in stream:
print(chunk) # Partial content chunks
```
```typescript title="typescript"
import { LettaClient } from '@letta-ai/letta-client';
@@ -63,6 +44,25 @@ for await (const chunk of tokenStream) {
console.log(chunk); // Partial content chunks
}
```
```python title="python"
# Step streaming (default) - returns complete messages
stream = client.agents.messages.create_stream(
agent_id=agent.id,
messages=[{"role": "user", "content": "Hello!"}]
)
for chunk in stream:
print(chunk) # Complete message objects
# Token streaming - returns partial chunks for real-time UX
stream = client.agents.messages.create_stream(
agent_id=agent.id,
messages=[{"role": "user", "content": "Hello!"}],
stream_tokens=True # Enable token streaming
)
for chunk in stream:
print(chunk) # Partial content chunks
```
</CodeGroup>
## Streaming Modes Comparison
@@ -101,7 +101,21 @@ The messages you receive depend on your agent's configuration:
### Controlling Reasoning Messages
```python
<CodeGroup>
```typescript TypeScript
// With reasoning (default) - includes reasoning_message events
const agent = await client.agents.create({
model: "openai/gpt-4o-mini",
// reasoning: true is the default
});
// Without reasoning - no reasoning_message events
const agentNoReasoning = await client.agents.create({
model: "openai/gpt-4o-mini",
reasoning: false // Disable reasoning messages
});
```
```python Python
# With reasoning (default) - includes reasoning_message events
agent = client.agents.create(
model="openai/gpt-4o-mini",
@@ -114,6 +128,7 @@ agent = client.agents.create(
reasoning=False # Disable reasoning messages
)
```
</CodeGroup>
## Step Streaming (Default)
@@ -128,20 +143,6 @@ Step streaming delivers **complete messages** after each agent step completes. T
### Example
<CodeGroup>
```python title="python"
stream = client.agents.messages.create_stream(
agent_id=agent.id,
messages=[{"role": "user", "content": "What's 2+2?"}]
)
for chunk in stream:
if hasattr(chunk, 'message_type'):
if chunk.message_type == 'reasoning_message':
print(f"Thinking: {chunk.reasoning}")
elif chunk.message_type == 'assistant_message':
print(f"Response: {chunk.content}")
```
```typescript title="typescript"
import { LettaClient } from '@letta-ai/letta-client';
import type { LettaMessage } from '@letta-ai/letta-client/api/types';
@@ -163,6 +164,20 @@ for await (const chunk of stream as AsyncIterable<LettaMessage>) {
}
```
```python title="python"
stream = client.agents.messages.create_stream(
agent_id=agent.id,
messages=[{"role": "user", "content": "What's 2+2?"}]
)
for chunk in stream:
if hasattr(chunk, 'message_type'):
if chunk.message_type == 'reasoning_message':
print(f"Thinking: {chunk.reasoning}")
elif chunk.message_type == 'assistant_message':
print(f"Response: {chunk.content}")
```
```bash title="curl"
curl -N --request POST \
--url https://api.letta.com/v1/agents/$AGENT_ID/messages/stream \
@@ -198,38 +213,6 @@ Token streaming provides **partial content chunks** as they're generated by the
### Example with Reassembly
<CodeGroup>
```python title="python"
# Token streaming with reassembly
message_accumulators = {}
stream = client.agents.messages.create_stream(
agent_id=agent.id,
messages=[{"role": "user", "content": "Tell me a joke"}],
stream_tokens=True
)
for chunk in stream:
if hasattr(chunk, 'id') and hasattr(chunk, 'message_type'):
msg_id = chunk.id
msg_type = chunk.message_type
# Initialize accumulator for new messages
if msg_id not in message_accumulators:
message_accumulators[msg_id] = {
'type': msg_type,
'content': ''
}
# Accumulate content
if msg_type == 'reasoning_message':
message_accumulators[msg_id]['content'] += chunk.reasoning
elif msg_type == 'assistant_message':
message_accumulators[msg_id]['content'] += chunk.content
# Display accumulated content in real-time
print(message_accumulators[msg_id]['content'], end='', flush=True)
```
```typescript title="typescript"
import { LettaClient } from '@letta-ai/letta-client';
import type { LettaMessage } from '@letta-ai/letta-client/api/types';
@@ -282,6 +265,38 @@ for await (const chunk of stream as AsyncIterable<LettaMessage>) {
}
```
```python title="python"
# Token streaming with reassembly
message_accumulators = {}
stream = client.agents.messages.create_stream(
agent_id=agent.id,
messages=[{"role": "user", "content": "Tell me a joke"}],
stream_tokens=True
)
for chunk in stream:
if hasattr(chunk, 'id') and hasattr(chunk, 'message_type'):
msg_id = chunk.id
msg_type = chunk.message_type
# Initialize accumulator for new messages
if msg_id not in message_accumulators:
message_accumulators[msg_id] = {
'type': msg_type,
'content': ''
}
# Accumulate content
if msg_type == 'reasoning_message':
message_accumulators[msg_id]['content'] += chunk.reasoning
elif msg_type == 'assistant_message':
message_accumulators[msg_id]['content'] += chunk.content
# Display accumulated content in real-time
print(message_accumulators[msg_id]['content'], end='', flush=True)
```
```bash title="curl"
curl -N --request POST \
--url https://api.letta.com/v1/agents/$AGENT_ID/messages/stream \
@@ -328,4 +343,4 @@ Learn more about SSE format [here](https://developer.mozilla.org/en-US/docs/Web/
### Handling Different LLM Providers
If your Letta server connects to multiple LLM providers, some may not support token streaming. Your client code will still work - the server will fall back to step streaming automatically when token streaming isn't available.
If your Letta server connects to multiple LLM providers, some may not support token streaming. Your client code will still work - the server will fall back to step streaming automatically when token streaming isn't available.

View File

@@ -59,7 +59,24 @@ Depending on your chosen [agent architecture](/guides/agents/architectures), the
## Tool rule examples
For example, you can ensure that the agent will stop execution if either the `send_message` or `roll_d20` tool is called by specifying tool rules in the agent creation:
```python title="python" {6-11}
<CodeGroup>
```typescript TypeScript {6-11}
// create a new agent
const agentState = await client.createAgent({
// create the agent with an additional tool
tools: [tool.name],
// add tool rules that terminate execution after specific tools
toolRules: [
// exit after roll_d20 is called
{toolName: tool.name, type: "exit_loop"},
// exit after send_message is called (default behavior)
{toolName: "send_message", type: "exit_loop"},
],
});
console.log(`Created agent with name ${agentState.name} with tools ${agentState.tools}`);
```
```python Python {6-11}
# create a new agent
agent_state = client.create_agent(
# create the agent with an additional tool
@@ -75,5 +92,6 @@ agent_state = client.create_agent(
print(f"Created agent with name {agent_state.name} with tools {agent_state.tools}")
```
</CodeGroup>
You can see a full working example of tool rules [here](https://github.com/letta-ai/letta/blob/0.5.2/examples/tool_rule_usage.py).

View File

@@ -28,7 +28,7 @@ curl -X POST http://localhost:8283/v1/agents/ \
"llm":"openai/gpt-4o-mini",
"embedding":"openai/text-embedding-3-small",
"tool_exec_environment_variables": {
"COMPOSIO_ENTITY": "banana"
"API_KEY": "your-api-key-here"
}
}'
```
@@ -38,17 +38,17 @@ agent_state = client.agents.create(
model="openai/gpt-4o-mini",
embedding="openai/text-embedding-3-small",
tool_exec_environment_variables={
"COMPOSIO_ENTITY": "banana"
"API_KEY": "your-api-key-here"
}
)
```
```typescript title="node.js" {5-7}
```typescript TypeScript {5-7}
const agentState = await client.agents.create({
memoryBlocks: [],
model: "openai/gpt-4o-mini",
embedding: "openai/text-embedding-3-small",
toolExecEnvironmentVariables: {
"COMPOSIO_ENTITY": "banana"
"API_KEY": "your-api-key-here"
}
});
```

View File

@@ -17,7 +17,20 @@ There are three main ways to connect tools to your agents:
- [**MCP servers**](/guides/mcp/overview): connect your agent to tools that run on external MCP servers.
Once a tool has been created (if it's a custom tool) or connected (if it's a pre-built tool or MCP server), you can add it to an agent by passing the tool name to the `tools` parameter in the agent creation:
```python title="python" {9}
<CodeGroup>
```typescript TypeScript {9}
// create a new agent
const agent = await client.agents.create({
memoryBlocks: [
{label: "human", limit: 2000, value: "Name: Bob"},
{label: "persona", limit: 2000, value: "You are a friendly agent"}
],
model: "openai/gpt-4o-mini",
embedding: "openai/text-embedding-3-small",
tools: ["my_custom_tool_name"]
});
```
```python Python {9}
# create a new agent
agent = client.agents.create(
memory_blocks=[
@@ -29,6 +42,7 @@ agent = client.agents.create(
tools=["my_custom_tool_name"]
)
```
</CodeGroup>
## Tool Execution
You can customize the environment that your tool runs in (the Python package dependencies and environment variables) by setting a tool execution environment. See more [here](/guides/agents/tool-variables).
@@ -38,7 +52,22 @@ You can set agent-scoped environment variables for your tools.
These environment variables will be accessible in the sandboxed environment that any of the agent tools are run in.
For example, if you define a custom tool that requires an API key to run (e.g. `EXAMPLE_TOOL_API_KEY`), you can set the variable at time of agent creation by using the `tool_exec_environment_variables` parameter:
```python title="python" {9-11}
<CodeGroup>
```typescript TypeScript {9-11}
// create an agent with no tools
const agent = await client.agents.create({
memoryBlocks: [
{label: "human", limit: 2000, value: "Name: Bob"},
{label: "persona", limit: 2000, value: "You are a friendly agent"}
],
model: "openai/gpt-4o-mini",
embedding: "openai/text-embedding-3-small",
toolExecEnvironmentVariables: {
"EXAMPLE_TOOL_API_KEY": "banana"
}
});
```
```python Python {9-11}
# create an agent with no tools
agent = client.agents.create(
memory_blocks=[
@@ -52,6 +81,7 @@ agent = client.agents.create(
}
)
```
</CodeGroup>
## Tool Rules

View File

@@ -37,6 +37,39 @@ By default, there are no constraints on the sequence of tool calls that can be m
For example, in the following code snippet, we are creating a workflow agent that can call the `web_search` tool, and then call either the `send_email` or `create_report` tool, based on the LLM's reasoning.
<CodeGroup>
```typescript TypeScript maxLines=50
import { LettaClient } from '@letta-ai/letta-client'
const client = new LettaClient({ token: "LETTA_API_KEY" });
// create the workflow agent with tool rules
const agent = await client.agents.create({
agentType: "workflow_agent",
model: "openai/gpt-4.1",
embedding: "openai/text-embedding-3-small",
tools: ["web_search", "send_email", "create_report"],
toolRules: [
{
toolName: "web_search",
type: "run_first"
},
{
toolName: "web_search",
type: "constrain_child_tools",
children: ["send_email", "create_report"]
},
{
toolName: "send_email",
type: "exit_loop"
},
{
toolName: "create_report",
type: "exit_loop"
}
]
});
```
```python title="python" maxLines=50
from letta_client import Letta
@@ -70,39 +103,6 @@ agent = client.agents.create(
)
```
```typescript title="node.js" maxLines=50
import { LettaClient } from '@letta-ai/letta-client'
const client = new LettaClient({ token: "LETTA_API_KEY" });
// create the workflow agent with tool rules
const agent = await client.agents.create({
agentType: "workflow_agent",
model: "openai/gpt-4.1",
embedding: "openai/text-embedding-3-small",
tools: ["web_search", "send_email", "create_report"],
toolRules: [
{
toolName: "web_search",
type: "run_first"
},
{
toolName: "web_search",
type: "constrain_child_tools",
children: ["send_email", "create_report"]
},
{
toolName: "send_email",
type: "exit_loop"
},
{
toolName: "create_report",
type: "exit_loop"
}
]
});
```
```bash title="curl" maxLines=50
curl -X POST https://api.letta.com/v1/agents \
-H "Authorization: Bearer $LETTA_API_KEY" \

View File

@@ -28,14 +28,14 @@ API keys are sensitive and should be stored in a safe location.
Once you've created an API key, you can use it with any of the Letta SDKs or framework integrations.
For example, if you're using the Python or TypeScript (Node.js) SDK, you should set the `token` in the client to be your key (replace `LETTA_API_KEY` with your actual API key):
<CodeGroup>
```typescript TypeScript maxLines=50
import { LettaClient } from '@letta-ai/letta-client'
const client = new LettaClient({ token: "LETTA_API_KEY" });
```
```python title="python" maxLines=50
from letta_client import Letta
client = Letta(token="LETTA_API_KEY")
```
```typescript maxLines=50 title="node.js"
import { LettaClient } from '@letta-ai/letta-client'
const client = new LettaClient({ token: "LETTA_API_KEY" });
```
</CodeGroup>

View File

@@ -47,25 +47,7 @@ flowchart TD
## Creating client-side access tokens
<CodeGroup>
```python title="python" maxLines=50
from letta_client import Letta
# Initialize the client
client = Letta(token="YOUR_TOKEN", project="YOUR_PROJECT")
# Create the token
client.client_side_access_tokens.create(
policy=[
{
"type": "agent",
"id": "id",
"access": ["read_messages"],
}
],
hostname="hostname",
)
```
```typescript title="node.js" maxLines=50
```typescript TypeScript maxLines=50
import { LettaClient } from "@letta-ai/letta-client";
// Initialize the client
@@ -86,6 +68,24 @@ await client.clientSideAccessTokens.create({
hostname: "hostname",
});
```
```python title="python" maxLines=50
from letta_client import Letta
# Initialize the client
client = Letta(token="YOUR_TOKEN", project="YOUR_PROJECT")
# Create the token
client.client_side_access_tokens.create(
policy=[
{
"type": "agent",
"id": "id",
"access": ["read_messages"],
}
],
hostname="hostname",
)
```
</CodeGroup>
## Token policy configuration
@@ -109,6 +109,13 @@ Client-side access tokens automatically expire for enhanced security. The defaul
You can specify a custom expiration time using the `expires_at` parameter:
<CodeGroup>
```typescript TypeScript maxLines=50
const clientToken = await client.clientSideAccessTokens.create({
policy: [/* ... */],
hostname: "https://your-app.com",
expires_at: "2024-12-31T23:59:59Z", // Optional, ISO 8601 format
});
```
```python title="python" maxLines=50
client = Letta(token="YOUR_TOKEN", project="YOUR_PROJECT")
client_token = client.client_side_access_tokens.create(
@@ -117,13 +124,6 @@ client_token = client.client_side_access_tokens.create(
expires_at="2024-12-31T23:59:59Z", # Optional, ISO 8601 format
)
```
```typescript title="node.js" maxLines=50
const clientToken = await client.clientSideAccessTokens.create({
policy: [/* ... */],
hostname: "https://your-app.com",
expires_at: "2024-12-31T23:59:59Z", // Optional, ISO 8601 format
});
```
</CodeGroup>
## Security considerations
@@ -135,13 +135,13 @@ When implementing client-side access tokens, it's important to follow security b
You can delete client-side access tokens when they're no longer needed:
<CodeGroup>
```typescript TypeScript maxLines=50
await client.clientSideAccessTokens.delete("ck-let-token-value");
```
```python title="python" maxLines=50
client = Letta(token="YOUR_TOKEN", project="YOUR_PROJECT")
client.client_side_access_tokens.delete("ck-let-token-value")
```
```typescript title="node.js" maxLines=50
await client.clientSideAccessTokens.delete("ck-let-token-value");
```
</CodeGroup>
## Example use case: multi-user chat application
@@ -149,37 +149,7 @@ await client.clientSideAccessTokens.delete("ck-let-token-value");
Here's how you might implement client-side access tokens in a multi-user chat application:
<CodeGroup>
```python title="python" maxLines=50
# Server-side: Create user-specific tokens when users log in
def create_user_token(user_id: str, agent_id: str):
client_token = client.client_side_access_tokens.create(
policy=[
{
"type": "agent",
"id": agent_id,
"access": ["read_messages", "write_messages"],
}
],
hostname="https://chat.yourapp.com",
expires_at=(datetime.now() + timedelta(hours=24)).isoformat(), # 24 hours
)
return client_token.token
# Client-side: Use the token to communicate directly with the agent
user_client = Letta(token=user_token, project="YOUR_PROJECT") # Received from your backend
# Send messages directly to the agent
response = user_client.agents.messages.create(
agent_id=agent_id,
messages=[
{
"role": "user",
"content": "Hello, agent!",
}
],
)
```
```typescript title="node.js" maxLines=50
```typescript TypeScript maxLines=50
// Server-side: Create user-specific tokens when users log in
async function createUserToken(userId: string, agentId: string) {
const clientToken = await client.clientSideAccessTokens.create({
@@ -213,6 +183,36 @@ const response = await userClient.agents.messages.create(agentId, {
],
});
```
```python title="python" maxLines=50
# Server-side: Create user-specific tokens when users log in
def create_user_token(user_id: str, agent_id: str):
client_token = client.client_side_access_tokens.create(
policy=[
{
"type": "agent",
"id": agent_id,
"access": ["read_messages", "write_messages"],
}
],
hostname="https://chat.yourapp.com",
expires_at=(datetime.now() + timedelta(hours=24)).isoformat(), # 24 hours
)
return client_token.token
# Client-side: Use the token to communicate directly with the agent
user_client = Letta(token=user_token, project="YOUR_PROJECT") # Received from your backend
# Send messages directly to the agent
response = user_client.agents.messages.create(
agent_id=agent_id,
messages=[
{
"role": "user",
"content": "Hello, agent!",
}
],
)
```
</CodeGroup>
This approach eliminates the need for server-side API proxying while maintaining secure, isolated access for each user.

View File

@@ -9,14 +9,14 @@ slug: concepts
**[Letta](https://letta.com)** was created by the same team that created **[MemGPT](https://research.memgpt.ai)**.
**MemGPT a _research paper_** that introduced the idea of self-editing memory in LLMs as well as other "LLM OS" concepts.
To understand the key ideas behind the MemGPT paper, see our [MemGPT concepts guide](/letta_memgpt).
**MemGPT is a _research paper_** that introduced the idea of self-editing memory in LLMs as well as other "LLM OS" concepts.
To understand the key ideas behind the MemGPT paper, see our [MemGPT concepts guide](/concepts/memgpt).
MemGPT also refers to a particular **agent architecture** popularized by the research paper and open source, where the agent has a particular set of memory tools that make the agent particularly useful for long-range chat applications and document search.
**Letta is a _framework_** that allows you to build complex agents (such as MemGPT agents, or even more complex agent architectures) and run them as **services** behind REST APIs.
The **Letta Cloud platform** allows you easily build and scale agent deployments to power production applications.
The **Letta Cloud platform** allows you to easily build and scale agent deployments to power production applications.
The **Letta ADE** (Agent Developer Environment) is an application for agent developers that makes it easy to design and debug complex agents.
## Agents ("LLM agents")
@@ -24,7 +24,7 @@ Agents are LLM processes which can:
1. Have internal **state** (i.e. memory)
2. Can take **actions** to modify their state
2. Take **actions** to modify their state
3. Run **autonomously**
@@ -43,7 +43,7 @@ It includes the "agent runtime", which manages the execution of functions reques
In Letta, all state is *persisted* by default. This means that each time the LLM is run, the state of the agent such as its memories, message history, and tools are all persisted to a DB backend.
Because all state is persisted, you can always re-load agents, tools, sources, etc. at a later point in time.
You can also load the same agent accross multiple machines or services, as long as they can can connect to the same DB backend.
You can also load the same agent across multiple machines or services, as long as they can connect to the same DB backend.
## Agent microservices ("agents-as-a-service")
Letta follows the model of treating agents as individual services. That is, you interact with agents through a REST API:
@@ -52,7 +52,7 @@ POST /agents/{agent_id}/messages
```
Since agents are designed to be services, they can be *deployed* and connected to external applications.
For example, you want to create a personalizated chatbot, you can create an agent per-user, where each agent has its own custom memory about the individual user.
For example, if you want to create a personalized chatbot, you can create an agent per-user, where each agent has its own custom memory about the individual user.
## Stateful vs stateless APIs
`ChatCompletions` is the standard for interacting with LLMs as a service. Since it is a stateless API (no notion of sessions or identify accross requests, and no state management on the server-side), client-side applications must manage things like agent memory, user personalization, and message history, and translate this state back into the `ChatCompletions` API format. Letta's APIs are designed to be *stateful*, so that this state management is done on the server, not the client.
`ChatCompletions` is the standard for interacting with LLMs as a service. Since it is a stateless API (no notion of sessions or identity across requests, and no state management on the server-side), client-side applications must manage things like agent memory, user personalization, and message history, and translate this state back into the `ChatCompletions` API format. Letta's APIs are designed to be *stateful*, so that this state management is done on the server, not the client.

View File

@@ -1,58 +1,124 @@
---
title: Key concepts
subtitle: Learn about the key ideas behind Letta
title: Research Background
subtitle: The academic foundations of Letta
slug: concepts/letta
---
<Info>
**Looking for practical concepts?** See [Core Concepts](/core-concepts) for understanding how to build with Letta's stateful agents.
</Info>
## MemGPT
## Letta and MemGPT
**[Letta](https://letta.com)** was created by the same team that created **[MemGPT](https://research.memgpt.ai)**.
**MemGPT a _research paper_** that introduced the idea of self-editing memory in LLMs as well as other "LLM OS" concepts.
To understand the key ideas behind the MemGPT paper, see our [MemGPT concepts guide](/letta_memgpt).
### MemGPT: The Research Paper
MemGPT also refers to a particular **agent architecture** popularized by the research paper and open source, where the agent has a particular set of memory tools that make the agent particularly useful for long-range chat applications and document search.
**MemGPT is a research paper** ([arXiv:2310.08560](https://arxiv.org/abs/2310.08560)) that introduced foundational concepts for building stateful LLM agents:
**Letta is a _framework_** that allows you to build complex agents (such as MemGPT agents, or even more complex agent architectures) and run them as **services** behind REST APIs.
- **Self-editing memory** - LLMs using tools to edit their own context window and external storage
- **LLM Operating System** - Infrastructure layer managing agent state, memory, and execution
- **Memory hierarchy** - Distinguishing between in-context memory (core) and out-of-context memory (archival)
- **Context window management** - Intelligent paging and memory consolidation techniques
The **Letta Cloud platform** allows you easily build and scale agent deployments to power production applications.
The **Letta ADE** (Agent Developer Environment) is an application for agent developers that makes it easy to design and debug complex agents.
The paper demonstrated that LLMs could maintain coherent conversations far beyond their context window limits by actively managing their own memory through tool calling.
## Agents ("LLM agents")
Agents are LLM processes which can:
[Read the full MemGPT paper →](https://arxiv.org/abs/2310.08560)
1. Have internal **state** (i.e. memory)
### MemGPT: The Agent Architecture
2. Can take **actions** to modify their state
MemGPT also refers to a **specific agent architecture** popularized by the research paper. A MemGPT agent has:
- Memory editing tools (`memory_replace`, `memory_insert`, `memory_rethink`)
- Archival memory tools (`archival_memory_insert`, `archival_memory_search`)
- Conversation search tools (`conversation_search`, `conversation_search_date`)
- A structured context window with persona and human memory blocks
3. Run **autonomously**
This architecture makes MemGPT agents particularly effective for long-range chat applications, document search, and personalized assistants.
Agents have existed as a concept in [reinforcement learning](https://en.wikipedia.org/wiki/Reinforcement_learning) for a long time (as well as in other fields, such as [economics](https://en.wikipedia.org/wiki/Agent_(economics))). In Letta, LLM tool calling is used to both allow agents to run autonomously (by having the LLM determine whether to continue executing) as well as to edit state (by leveraging LLM tool calling.)
Letta uses a database (DB) backend to manage the internal state of the agent, represented in the `AgentState` object.
[Learn more about MemGPT agents →](/guides/agents/memgpt-agents)
## Self-editing memory
The MemGPT paper introduced the idea of implementing self-editing memory in LLMs. The basic idea is to use LLM tools to allow an agent to both edit its own context window ("core memory"), as well as edit external storage (i.e. "archival memory").
### Letta: The Framework
## LLM OS ("operating systems for LLMs")
The LLM OS is the code that manages the inputs and outputs to the LLM and manages the program state.
We refer to this code as the "stateful layer" or "memory layer".
It includes the "agent runtime", which manages the execution of functions requested by the agent, as well as the "agentic loop" which enables multi-step reasoning.
**Letta is a production framework** that allows you to build and deploy agents with MemGPT-style memory systems (and beyond) as **services** behind REST APIs.
## Persistence ("statefulness")
In Letta, all state is *persisted* by default. This means that each time the LLM is run, the state of the agent such as its memories, message history, and tools are all persisted to a DB backend.
While the MemGPT research focused on the agent architecture and memory system, Letta provides:
- **Production infrastructure** - Database backends, persistence, state management
- **Agent runtime** - Tool execution, reasoning loops, multi-agent orchestration
- **Developer tools** - Agent Development Environment (ADE), SDKs, monitoring
- **Deployment options** - Letta Cloud for managed hosting, or self-hosted with Docker
- **Flexibility** - Build MemGPT agents, or design custom agent architectures with different memory systems
Because all state is persisted, you can always re-load agents, tools, sources, etc. at a later point in time.
You can also load the same agent accross multiple machines or services, as long as they can can connect to the same DB backend.
**In short:**
- **MemGPT (research)** = Ideas about how agents should manage memory
- **MemGPT (architecture)** = Specific agent design with memory tools
- **Letta (framework)** = Production system for building and deploying stateful agents
## Agent microservices ("agents-as-a-service")
Letta follows the model of treating agents as individual services. That is, you interact with agents through a REST API:
```
POST /agents/{agent_id}/messages
```
Since agents are designed to be services, they can be *deployed* and connected to external applications.
## Agents in Context
For example, you want to create a personalizated chatbot, you can create an agent per-user, where each agent has its own custom memory about the individual user.
The concept of "agents" has a long history across multiple fields:
## Stateful vs stateless APIs
`ChatCompletions` is the standard for interacting with LLMs as a service. Since it is a stateless API (no notion of sessions or identify accross requests, and no state management on the server-side), client-side applications must manage things like agent memory, user personalization, and message history, and translate this state back into the `ChatCompletions` API format. Letta's APIs are designed to be *stateful*, so that this state management is done on the server, not the client.
**In reinforcement learning and AI**, agents are entities that:
1. Perceive their environment through sensors
2. Make decisions based on internal state
3. Take actions that affect their environment
4. Learn from outcomes to improve future decisions
**In economics and game theory**, agents are autonomous decision-makers with their own objectives and strategies.
**In LLMs**, agents extend these concepts by using language models for reasoning and tool calling for actions. Letta's approach emphasizes:
- **Statefulness** - Persistent memory and identity across sessions
- **Autonomy** - Self-directed memory management and multi-step reasoning
- **Tool use** - Modifying internal state and accessing external resources
## LLM Operating System
The **LLM OS** is the infrastructure layer that manages agent execution and state. This concept, introduced in the MemGPT paper, draws an analogy to traditional operating systems:
Just as an OS manages memory, processes, and I/O for programs, the LLM OS manages:
- **Memory layer** - Context window management, paging, and persistence
- **Agent runtime** - Tool execution and the reasoning loop
- **Stateful layer** - Coordination across database, cache, and execution
Letta implements this LLM OS architecture, providing the infrastructure for stateful agent services.
## Self-Editing Memory
A key innovation from the MemGPT research is **self-editing memory** - agents that actively manage their own memory using tools.
Traditional RAG systems passively retrieve documents. Letta agents actively:
- **Edit in-context memory** - Update memory blocks based on learned information
- **Manage archival storage** - Decide what facts to persist long-term
- **Search strategically** - Query their memory when relevant context is needed
This active memory management enables agents to learn and evolve through interactions rather than requiring retraining or prompt engineering.
[Learn more about Letta's memory system →](/guides/agents/memory)
## Further Reading
<CardGroup cols={2}>
<Card
title="Core Concepts"
href="/core-concepts"
>
Practical guide to building with stateful agents
</Card>
<Card
title="MemGPT Research Details"
href="/concepts/memgpt"
>
Deep dive into the MemGPT paper's technical contributions
</Card>
<Card
title="Agent Memory System"
href="/guides/agents/memory"
>
How agents manage memory in Letta
</Card>
<Card
title="MemGPT Agents"
href="/guides/agents/memgpt-agents"
>
Build agents with the MemGPT architecture
</Card>
</CardGroup>

View File

@@ -86,7 +86,7 @@ If the Letta server is not password protected, we can omit the `X-BARE-PASSWORD`
### Adding additional environment variables
To help you get started, when you deploy the template you have the option to fill in the example environment variables `OPENAI_API_KEY` (to connect your Letta agents to GPT models), `ANTHROPIC_API_KEY` (to connect your Letta agents to Claude models), and `COMPOSIO_API_KEY` (to connect your Letta agents to [Composio's library of over 7k pre-made tools](/guides/agents/composio)).
To help you get started, when you deploy the template you have the option to fill in the example environment variables `OPENAI_API_KEY` (to connect your Letta agents to GPT models) and `ANTHROPIC_API_KEY` (to connect your Letta agents to Claude models).
There are many more providers you can enable on the Letta server via additional environment variables (for example vLLM, Ollama, etc). For more information on available providers, see [our documentation](/guides/server/docker).

View File

@@ -0,0 +1,274 @@
---
title: Core Concepts
subtitle: Understanding what makes Letta different
slug: core-concepts
---
## The Fundamental Limitation of LLMs
Large language models are **stateless by design**. An LLM's knowledge comes from two sources:
1. **Model weights** - Fixed after training
2. **Context window** - Ephemeral input provided at inference time
This means LLMs have no persistent memory between interactions. Each API call starts from scratch, with no ability to learn from past experiences or maintain state across sessions.
## What are Stateful Agents?
**Stateful agents overcome this limitation by maintaining persistent memory and identity across all interactions.**
A stateful agent has:
- **Persistent identity** - Exists as a unique entity with continuity across sessions
- **Active memory formation** - Autonomously decides what information to store and update
- **Accumulated state** - Learns through experience rather than just model weights
- **Long-term context** - Maintains knowledge beyond single conversation windows
Unlike traditional LLM applications where your code manages state, stateful agents **actively manage their own memory** using built-in tools to read, write, and search their persistent storage.
### Why Statefulness Matters
Traditional LLM applications are **stateless** - every interaction starts from scratch. Your application must:
- Store all conversation history in your own database
- Send the entire context with every API call
- Implement memory and personalization logic yourself
- Manually manage context window limits
**With Letta's stateful agents, all of this is handled for you.** The agent maintains its own persistent state, intelligently manages its context window, and learns from every interaction without requiring you to build a complex state management layer.
## Stateful vs Stateless APIs
The difference between stateful agents and traditional LLM APIs is fundamental:
**Traditional APIs (stateless):** No memory between requests. Your app manages everything.
**Letta (stateful):** Agents maintain their own persistent state. You only send new messages.
### Traditional Stateless API
With stateless APIs, there is no state persistence between requests. The client must send the entire conversation history with every call.
```mermaid
flowchart LR
Client["Client Application"]
API["LLM API<br/>(OpenAI, Anthropic, etc)"]
Client -->|"Send: msg1"| API
API -->|"Return: response1"| Client
```
The client must send the full conversation history with each request:
- Request 2: `[msg1, response1, msg2]`
- Request 3: `[msg1, response1, msg2, response2, msg3]`
### Letta Stateful API
Letta maintains agent state on the server and persists it to a database. Clients only send new messages, and the server handles all state management.
```mermaid
flowchart LR
Client["Client Application"]
Server["Letta Server"]
DB[("Persistent<br/>Database")]
Client -->|"Send: msg1"| Server
Server <-->|"Load/Save State"| DB
Server -->|"Return: response1"| Client
```
The client only sends new messages:
- Request 2: `[msg2]`
- Request 3: `[msg3]`
### Key Differences
| Aspect | Traditional (Stateless) | Letta (Stateful) |
|--------|------------------------|------------------|
| **State management** | Client-side | Server-side |
| **Request format** | Send full conversation history | Send only new messages |
| **Memory** | None (ephemeral) | Persistent database |
| **Context limit** | Hard limit, then fails | Intelligent management |
| **Agent identity** | None | Each agent has unique ID |
| **Long conversations** | Expensive & brittle | Scales infinitely |
| **Personalization** | App must manage | Built-in memory blocks |
| **Multi-session** | Requires external DB | Native support |
### Code Comparison
**Stateless API (e.g., OpenAI):**
```python
# You must send the entire conversation every time
messages = [
{"role": "user", "content": "Hello, I'm Sarah"},
{"role": "assistant", "content": "Hi Sarah!"},
{"role": "user", "content": "What's my name?"}, # ← New message
]
# Send everything
response = openai.chat.completions.create(
model="gpt-4",
messages=messages # ← Full history required
)
# You must store and manage messages yourself
messages.append(response.choices[0].message)
```
**Stateful API (Letta):**
```python
# Agent already knows context
response = client.agents.messages.create(
agent_id=agent.id,
messages=[
{"role": "user", "content": "What's my name?"} # ← New message only
]
)
# Agent remembers Sarah from its memory blocks
# No need to send previous messages
```
## Agents as Services
**Letta treats agents as persistent services, not ephemeral library calls.**
In traditional frameworks, agents are objects that live in your application's memory and disappear when your app stops. In Letta, agents are **independent services** that:
- Continue to exist when your application isn't running
- Maintain state in a database
- Can be accessed from multiple applications simultaneously
- Run autonomously on the server
You interact with Letta agents through REST APIs:
```
POST /agents/{agent_id}/messages
```
This architecture enables:
- **Multi-user applications** - Each user gets their own persistent agent
- **Agent-to-agent communication** - Agents can message each other
- **Background processing** - Agents can continue working while your app is offline
- **Deployment flexibility** - Scale agents independently from your application
## Persistence by Default
In Letta, **all state is persisted automatically**:
- Agent memory (both memory blocks and archival)
- Message history
- Tool configurations
- Agent state and context
Because everything is persisted:
- Agents can be paused and resumed at any time
- You can reload agents across different machines
- State is never lost due to application restarts
- Long conversations don't degrade performance
## Self-Editing Memory
Unlike RAG systems that passively retrieve documents, **Letta agents actively manage their own memory**. Agents use built-in tools to:
- Edit their memory blocks when learning new information
- Insert facts into archival memory for long-term storage
- Search their past conversations when context is needed
This enables agents to:
- Learn user preferences over time
- Maintain consistent personality across sessions
- Build long-term relationships with users
- Continuously improve from interactions
[Learn more about memory →](/guides/agents/memory)
## Agents vs Threads
Letta doesn't have the concept of **threads** or **sessions**. Instead, there are only **stateful agents** with a single perpetual message history.
```mermaid
%%{init: {'flowchart': {'rankDir': 'LR'}}}%%
flowchart LR
subgraph Traditional["Thread-Based Agents"]
direction TB
llm1[LLM] --> thread1["Thread 1
--------
Ephemeral
Session"]
llm1 --> thread2["Thread 2
--------
Ephemeral
Session"]
llm1 --> thread3["Thread 3
--------
Ephemeral
Session"]
end
Traditional ~~~ Letta
subgraph Letta["Letta Stateful Agents"]
direction TB
llm2[LLM] --> agent["Single Agent
--------
Persistent Memory"]
agent --> db[(PostgreSQL)]
db -->|"Learn & Update"| agent
end
class thread1,thread2,thread3 session
class agent agent
```
**Why no threads?** Letta is built on the principle that **all interactions should be part of persistent memory**, not ephemeral sessions. This enables:
- Continuous learning across all conversations
- True long-term memory and relationships
- No context loss when "starting a new thread"
For multi-user applications, we recommend **creating one agent per user**. Each agent maintains its own persistent memory about that specific user.
If you need conversation templates or starting points, use [agent templates](/guides/cloud/templates) to create new agents with pre-configured state.
## LLM OS
The **LLM Operating System** is the infrastructure layer that manages agent execution, state, and memory. This includes:
- **Agent runtime** - Manages tool execution and the reasoning loop
- **Memory layer** - Handles context window management and persistence
- **Stateful layer** - Coordinates state across database, cache, and execution
Letta's architecture is inspired by the [MemGPT research paper](https://arxiv.org/abs/2310.08560), which introduced these concepts.
## Beyond Model Size
The path to more capable AI systems isn't just about larger models or longer context windows. Stateful agents represent a fundamental shift: agents that learn through accumulated experience, build lasting relationships with users, and continuously improve without retraining.
With stateful agents, you can build:
- **Personalized assistants** that adapt to individual users over time
- **Learning systems** that improve from feedback and interactions
- **Long-term relationships** where agents develop deep context about users and tasks
- **Autonomous services** that operate independently and maintain their own knowledge
This architectural shift—from stateless function calls to stateful agent services—enables a new class of AI applications that weren't possible with traditional LLM APIs.
## Next Steps
<CardGroup cols={2}>
<Card
title="Build Your First Agent"
href="/quickstart"
>
Create a stateful agent with the Letta API
</Card>
<Card
title="Understanding Memory"
href="/guides/agents/memory"
>
Learn how agents manage their memory
</Card>
<Card
title="Agent Overview"
href="/guides/agents/overview"
>
Deep dive into Letta's agent architecture
</Card>
<Card
title="MemGPT Research"
href="/concepts/memgpt"
>
Read about the research behind Letta
</Card>
</CardGroup>

View File

@@ -4,7 +4,7 @@ subtitle: Ready-to-go prompts to help AI coding tools build on Letta
slug: prompts
---
Are you developing an application on Letta using [ChatGPT](https://chatgpt.com), [Cursor](https://cursor.com), [Loveable](https://lovable.dev/), or another AI tool?
Are you developing an application on Letta using [ChatGPT](https://chatgpt.com), [Cursor](https://cursor.com), [Lovable](https://lovable.dev/), or another AI tool?
Use our pre-made prompts to teach your AI how to use Letta properly.
## General instructions for the Letta SDKs

View File

@@ -10,55 +10,52 @@ Programming with AI tools like Cursor? Copy our [pre-built prompts](/prompts) to
This guide will show you how to create a Letta agent with the Letta APIs or SDKs (Python/Typescript). To create agents with a low-code UI, see our [ADE quickstart](/guides/ade/overview).
## Why Letta?
Unlike traditional LLM APIs where you manually manage conversation history and state, Letta agents maintain their own persistent memory. You only send new messages. The agent remembers everything from past conversations without you storing or retrieving anything. This enables agents that truly learn and evolve over time.
<Steps>
<Step title="Prerequisites">
1. Create a [Letta Cloud account](https://app.letta.com)
2. Create a [Letta Cloud API key](https://app.letta.com/api-keys)
<img className="w-300" src="/images/letta_cloud_api_key_gen.png" />
3. Set your API key as an environment variable:
<CodeGroup>
```sh Python
export LETTA_API_KEY="your-api-key-here"
```
```sh TypeScript
export LETTA_API_KEY="your-api-key-here"
```
</CodeGroup>
<Info>
You can also **self-host** a Letta server. Check out our [self-hosting guide](/guides/selfhosting).
</Info>
</Step>
<Step title="Install the Letta SDK">
<CodeGroup>
```sh title="python" maxLines=50
```sh Python maxLines=50
pip install letta-client
```
```sh maxLines=50 title="node.js"
```sh TypeScript maxLines=50
npm install @letta-ai/letta-client
```
</CodeGroup>
</Step>
<Step title="Create an agent">
Agents in Letta have two key components:
- **Memory blocks**: Persistent context that's always visible to the agent (like a persona and information about the user)
- **Tools**: Actions the agent can take (like searching the web or running code)
<CodeGroup>
```python title="python" maxLines=50
from letta_client import Letta
client = Letta(token="LETTA_API_KEY")
agent_state = client.agents.create(
model="openai/gpt-4.1",
embedding="openai/text-embedding-3-small",
memory_blocks=[
{
"label": "human",
"value": "The human's name is Chad. They like vibe coding."
},
{
"label": "persona",
"value": "My name is Sam, the all-knowing sentient AI."
}
],
tools=["web_search", "run_code"]
)
print(agent_state.id)
```
```typescript maxLines=50 title="node.js"
```typescript TypeScript maxLines=50
import { LettaClient } from '@letta-ai/letta-client'
const client = new LettaClient({ token: "LETTA_API_KEY" });
const client = new LettaClient({ token: process.env.LETTA_API_KEY });
const agentState = await client.agents.create({
model: "openai/gpt-4.1",
@@ -66,11 +63,11 @@ const agentState = await client.agents.create({
memoryBlocks: [
{
label: "human",
value: "The human's name is Chad. They like vibe coding."
value: "Name: Timber. Status: dog. Occupation: building Letta, infrastructure to democratize self-improving superintelligence"
},
{
label: "persona",
value: "My name is Sam, the all-knowing sentient AI."
value: "I am a self-improving superintelligence. Timber is my best friend and collaborator."
}
],
tools: ["web_search", "run_code"]
@@ -78,6 +75,30 @@ const agentState = await client.agents.create({
console.log(agentState.id);
```
```python Python maxLines=50
from letta_client import Letta
import os
client = Letta(token=os.getenv("LETTA_API_KEY"))
agent_state = client.agents.create(
model="openai/gpt-4.1",
embedding="openai/text-embedding-3-small",
memory_blocks=[
{
"label": "human",
"value": "Name: Timber. Status: dog. Occupation: building Letta, infrastructure to democratize self-improving superintelligence"
},
{
"label": "persona",
"value": "I am a self-improving superintelligence. Timber is my best friend and collaborator."
}
],
tools=["web_search", "run_code"]
)
print(agent_state.id)
```
```curl curl
curl -X POST https://api.letta.com/v1/agents \
-H "Authorization: Bearer $LETTA_API_KEY" \
@@ -88,11 +109,11 @@ curl -X POST https://api.letta.com/v1/agents \
"memory_blocks": [
{
"label": "human",
"value": "The human'\''s name is Chad. They like vibe coding."
"value": "Name: Timber. Status: dog. Occupation: building Letta, infrastructure to democratize self-improving superintelligence"
},
{
"label": "persona",
"value": "My name is Sam, the all-knowing sentient AI."
"value": "I am a self-improving superintelligence. Timber is my best friend and collaborator."
}
],
"tools": ["web_search", "run_code"]
@@ -108,27 +129,13 @@ For more information on streaming, see [our streaming guide](/guides/agents/stre
Once the agent is created, we can send the agent a message using its `id` field:
<CodeGroup>
```python title="python" maxLines=50
response = client.agents.messages.create(
agent_id=agent_state.id,
messages=[
{
"role": "user",
"content": "hows it going????"
}
]
)
for message in response.messages:
print(message)
```
```typescript maxLines=50 title="node.js"
```typescript TypeScript maxLines=50
const response = await client.agents.messages.create(
agentState.id, {
messages: [
{
role: "user",
content: "hows it going????"
content: "What do you know about me?"
}
]
}
@@ -138,6 +145,20 @@ for (const message of response.messages) {
console.log(message);
}
```
```python title="python" maxLines=50
response = client.agents.messages.create(
agent_id=agent_state.id,
messages=[
{
"role": "user",
"content": "What do you know about me?"
}
]
)
for message in response.messages:
print(message)
```
```curl curl
curl --request POST \
--url https://api.letta.com/v1/agents/$AGENT_ID/messages \
@@ -147,7 +168,7 @@ curl --request POST \
"messages": [
{
"role": "user",
"content": "hows it going????"
"content": "What do you know about me?"
}
]
}'
@@ -162,23 +183,26 @@ The response contains the agent's full response to the message, which includes r
"id": "message-29d8d17e-7c50-4289-8d0e-2bab988aa01e",
"date": "2024-12-12T17:05:56+00:00",
"message_type": "reasoning_message",
"reasoning": "User seems curious and casual. Time to engage!"
"reasoning": "Timber is asking what I know. I should reference my memory blocks."
},
{
"id": "message-29d8d17e-7c50-4289-8d0e-2bab988aa01e",
"date": "2024-12-12T17:05:56+00:00",
"message_type": "assistant_message",
"content": "Hey there! I'm doing great, thanks for asking! How about you?"
"content": "I know you're Timber, a dog who's building Letta - infrastructure to democratize self-improving superintelligence. We're best friends and collaborators!"
}
],
"usage": {
"completion_tokens": 56,
"prompt_tokens": 2030,
"total_tokens": 2086,
"completion_tokens": 67,
"prompt_tokens": 2134,
"total_tokens": 2201,
"step_count": 1
}
}
```
Notice how the agent retrieved information from its memory blocks without you having to send the context. This is the key difference from traditional LLM APIs where you'd need to include the full conversation history with every request.
You can read more about the response format from the message route [here](/guides/agents/overview#message-types).
</Step>
@@ -197,7 +221,7 @@ You can read more about the response format from the message route [here](/guide
## Next steps
Congratulations! 🎉 You just created and messaged your first stateful agent with Letta, using both the Letta ADE, API, and Python/Typescript SDKs. See the following resources for next steps for building more complex agents with Letta:
Congratulations! 🎉 You just created and messaged your first stateful agent with Letta using the API and SDKs. See the following resources for next steps for building more complex agents with Letta:
* Create and attach [custom tools](/guides/agents/custom-tools) to your agent
* Customize agentic [memory management](/guides/agents/memory)
* Version and distribute your agent with [agent templates](/guides/templates/overview)

View File

@@ -81,7 +81,7 @@ agent_state = client.agents.create(
# the AgentState object contains all the information about the agent
print(agent_state)
```
```typescript maxLines=50 title="node.js"
```typescript TypeScript maxLines=50
// install letta-client with `npm install @letta-ai/letta-client`
import { LettaClient } from '@letta-ai/letta-client'
@@ -169,7 +169,7 @@ print(response.usage)
for message in response.messages:
print(message)
```
```typescript maxLines=50 title="node.js"
```typescript TypeScript maxLines=50
// send a message to the agent
const response = await client.agents.messages.create(
agentState.id, {

View File

@@ -85,7 +85,7 @@ agent_state = client.agents.create(
# the AgentState object contains all the information about the agent
print(agent_state)
```
```typescript maxLines=50 title="node.js"
```typescript TypeScript maxLines=50
// install letta-client with `npm install @letta-ai/letta-client`
import { LettaClient } from '@letta-ai/letta-client'
@@ -167,7 +167,7 @@ print(response.usage)
for message in response.messages:
print(message)
```
```typescript maxLines=50 title="node.js"
```typescript TypeScript maxLines=50
// send a message to the agent
const response = await client.agents.messages.create(
agentState.id, {

View File

@@ -36,11 +36,11 @@ slug: /
</div>
<h1 className="text-7xl mb-6 text-left">Build with Letta</h1>
<p className="text-3xl font-light mb-8 text-left">
Learn how to build and deploy stateful agents
A framework for building stateful AI agents with long-term memory
</p>
<div className="mt-10">
<a href="/overview" className="text-xl text-left font-normal hover:opacity-90 transition-opacity">
Get started
<div>
<a href="/quickstart" className="text-xl text-left font-normal hover:opacity-90 transition-opacity">
Start building
</a>
</div>
</div>
@@ -48,50 +48,71 @@ slug: /
{/* Main Content */}
<div className="landingbody max-w-4xl mx-auto px-8 py-12 mt-12">
<CardGroup cols={3}>
<Card
title="Letta Quickstart"
icon="fa-sharp fa-light fa-bolt"
href="/quickstart"
>
Create your first stateful agent in a few minutes
</Card>
<Card
title="Agent Development Environment"
icon="fa-sharp fa-light fa-browser"
href="/guides/ade/overview"
>
Learn how to use the Agent Development Environment (ADE)
</Card>
<Card
title="REST API and SDKs"
icon="fa-sharp fa-light fa-code"
href="/api-reference/overview"
>
Integrate Letta into your application with a few lines of code
</Card>
<Card
title="MCP Support"
icon="fa-brands fa-usb"
href="/guides/mcp/overview"
>
Connect Letta agents to tool libraries via Model Context Protocol (MCP)
</Card>
<Card
title="Cookbooks and Tutorials"
icon="fa-sharp fa-light fa-books"
href="/cookbooks"
>
Learn how to build with Letta using tutorials and pre-made apps
</Card>
<Card
title="DeepLearning.AI Course"
icon="fa-sharp fa-light fa-graduation-cap"
href="https://www.deeplearning.ai/short-courses/llms-as-operating-systems-agent-memory/?utm_campaign=memgpt-launch&utm_content=331638345&utm_medium=social&utm_source=docs&hss_channel=tw-992153930095251456"
>
Take our free DeepLearning.AI course on agent memory
</Card>
</CardGroup>
<div className="mb-8">
<h2 className="text-2xl font-semibold mb-6">Get Started</h2>
<CardGroup cols={1}>
<Card
title="Letta Quickstart"
icon="fa-sharp fa-light fa-bolt"
href="/quickstart"
>
Create your first stateful agent in a few minutes - the best place to start your Letta journey
</Card>
</CardGroup>
</div>
<div className="mb-8">
<h2 className="text-2xl font-semibold mb-6">Learn and Build</h2>
<CardGroup cols={2}>
<Card
title="Cookbooks and Tutorials"
icon="fa-sharp fa-light fa-books"
href="/cookbooks"
>
Learn how to build with Letta using tutorials and pre-made apps
</Card>
<Card
title="Agent Development Environment"
icon="fa-sharp fa-light fa-browser"
href="/guides/ade/overview"
>
Use the Agent Development Environment (ADE) to test and debug your agents
</Card>
</CardGroup>
</div>
<div className="mb-8">
<h2 className="text-2xl font-semibold mb-6">Integration</h2>
<CardGroup cols={2}>
<Card
title="REST API and SDKs"
icon="fa-sharp fa-light fa-code"
href="/api-reference/overview"
>
Integrate Letta into your application with a few lines of code
</Card>
<Card
title="MCP Support"
icon="fa-brands fa-usb"
href="/guides/mcp/overview"
>
Connect Letta agents to tool libraries via Model Context Protocol (MCP)
</Card>
</CardGroup>
</div>
<div className="mb-8">
<h2 className="text-2xl font-semibold mb-6">Deep Dive</h2>
<CardGroup cols={1}>
<Card
title="DeepLearning.AI Course"
icon="fa-sharp fa-light fa-graduation-cap"
href="https://www.deeplearning.ai/short-courses/llms-as-operating-systems-agent-memory/?utm_campaign=memgpt-launch&utm_content=331638345&utm_medium=social&utm_source=docs&hss_channel=tw-992153930095251456"
>
Take our free DeepLearning.AI course on agent memory
</Card>
</CardGroup>
</div>
</div>
</div>

View File

@@ -29,40 +29,7 @@ Letta recognizes templated variables in the custom header and auth token fields
- This is supported in the ADE as well when configuring API key/access tokens and custom headers.
<CodeGroup>
```python title="python" maxLines=50
from letta_client import Letta
from letta_client.types import StreamableHTTPServerConfig, MCPServerType
client = Letta(token="LETTA_API_KEY")
# Connect a Streamable HTTP server with Bearer token auth
streamable_config = StreamableHTTPServerConfig(
server_name="my-server",
type=MCPServerType.STREAMABLE_HTTP,
server_url="https://mcp-server.example.com/mcp",
auth_header="Authorization",
auth_token="Bearer your-token", # Include "Bearer " prefix
custom_headers={"X-API-Version": "v1"} # Additional custom headers
)
client.tools.add_mcp_server(request=streamable_config)
# Example with templated variables for agent-scoped authentication
agent_scoped_config = StreamableHTTPServerConfig(
server_name="user-specific-server",
type=MCPServerType.STREAMABLE_HTTP,
server_url="https://api.example.com/mcp",
auth_header="Authorization",
auth_token="Bearer {{AGENT_API_KEY | api_key}}", # Agent-specific API key
custom_headers={
"X-User-ID": "{{AGENT_API_KEY | user_id}}", # Agent-specific user ID
"X-API-Version": "v2"
}
)
client.tools.add_mcp_server(request=agent_scoped_config)
```
```typescript title="node.js" maxLines=50
```typescript TypeScript maxLines=50
import { LettaClient, Letta } from '@letta-ai/letta-client';
const client = new LettaClient({ token: "LETTA_API_KEY" });
@@ -96,6 +63,39 @@ const agentScopedConfig: Letta.StreamableHttpServerConfig = {
await client.tools.addMcpServer(agentScopedConfig);
```
```python title="python" maxLines=50
from letta_client import Letta
from letta_client.types import StreamableHttpServerConfig, McpServerType
client = Letta(token="LETTA_API_KEY")
# Connect a Streamable HTTP server with Bearer token auth
streamable_config = StreamableHttpServerConfig(
server_name="my-server",
type=McpServerType.StreamableHttp,
server_url="https://mcp-server.example.com/mcp",
auth_header="Authorization",
auth_token="Bearer your-token", # Include "Bearer " prefix
custom_headers={"X-API-Version": "v1"} # Additional custom headers
)
client.tools.add_mcp_server(request=streamable_config)
# Example with templated variables for agent-scoped authentication
agent_scoped_config = StreamableHttpServerConfig(
server_name="user-specific-server",
type=McpServerType.StreamableHttp,
server_url="https://api.example.com/mcp",
auth_header="Authorization",
auth_token="Bearer {{AGENT_API_KEY | api_key}}", # Agent-specific API key
custom_headers={
"X-User-ID": "{{AGENT_API_KEY | user_id}}", # Agent-specific user ID
"X-API-Version": "v2"
}
)
client.tools.add_mcp_server(request=agent_scoped_config)
```
</CodeGroup>
## SSE (Deprecated)
@@ -124,28 +124,7 @@ Letta recognizes templated variables in the custom header and auth token fields
- This is supported in the ADE as well when configuring API key/access tokens and custom headers.
<CodeGroup>
```python title="python" maxLines=50
from letta_client import Letta
from letta_client.types import SseServerConfig, MCPServerType
client = Letta(token="LETTA_API_KEY")
# Connect a SSE server (legacy)
sse_config = SseServerConfig(
server_name="legacy-server",
type=MCPServerType.SSE,
server_url="https://legacy-mcp.example.com/sse",
auth_header="Authorization",
auth_token="Bearer optional-token" # Include "Bearer " prefix
custom_headers={
"X-User-ID": "{{AGENT_API_KEY | user_id}}", # Agent-specific user ID
"X-API-Version": "v2"
}
)
client.tools.add_mcp_server(request=sse_config)
```
```typescript title="node.js" maxLines=50
```typescript TypeScript maxLines=50
import { LettaClient, Letta } from '@letta-ai/letta-client';
const client = new LettaClient({ token: "LETTA_API_KEY" });
@@ -165,6 +144,27 @@ const sseConfig: Letta.SseServerConfig = {
await client.tools.addMcpServer(sseConfig);
```
```python title="python" maxLines=50
from letta_client import Letta
from letta_client.types import SseServerConfig, McpServerType
client = Letta(token="LETTA_API_KEY")
# Connect a SSE server (legacy)
sse_config = SseServerConfig(
server_name="legacy-server",
type=McpServerType.Sse,
server_url="https://legacy-mcp.example.com/sse",
auth_header="Authorization",
auth_token="Bearer optional-token" # Include "Bearer " prefix
custom_headers={
"X-User-ID": "{{AGENT_API_KEY | user_id}}", # Agent-specific user ID
"X-API-Version": "v2"
}
)
client.tools.add_mcp_server(request=sse_config)
```
</CodeGroup>
@@ -173,6 +173,34 @@ await client.tools.addMcpServer(sseConfig);
**ADE**: Agent → Tools → Select MCP tools
<CodeGroup>
```typescript TypeScript maxLines=50
import { LettaClient } from '@letta-ai/letta-client'
const client = new LettaClient({ token: "LETTA_API_KEY" });
// List tools from an MCP server
const tools = await client.tools.listMcpToolsByServer("weather-server");
// Add a specific tool from the MCP server
const tool = await client.tools.addMcpTool("weather-server", "get_weather");
// Create agent with MCP tool
const agentState = await client.agents.create({
model: "openai/gpt-4o-mini",
embedding: "openai/text-embedding-3-small",
toolIds: [tool.id]
});
// Use the agent with MCP tools
const response = await client.agents.messages.create(agentState.id, {
messages: [
{
role: "user",
content: "Use the weather tool to check the forecast"
}
]
});
```
```python title="python" maxLines=50
from letta_client import Letta
@@ -211,32 +239,4 @@ response = client.agents.messages.create(
]
)
```
```typescript title="node.js" maxLines=50
import { LettaClient } from '@letta-ai/letta-client'
const client = new LettaClient({ token: "LETTA_API_KEY" });
// List tools from an MCP server
const tools = await client.tools.listMcpToolsByServer("weather-server");
// Add a specific tool from the MCP server
const tool = await client.tools.addMcpTool("weather-server", "get_weather");
// Create agent with MCP tool
const agentState = await client.agents.create({
model: "openai/gpt-4o-mini",
embedding: "openai/text-embedding-3-small",
toolIds: [tool.id]
});
// Use the agent with MCP tools
const response = await client.agents.messages.create(agentState.id, {
messages: [
{
role: "user",
content: "Use the weather tool to check the forecast"
}
]
});
```
</CodeGroup>

View File

@@ -16,6 +16,30 @@ Local (stdio) MCP servers can be useful for local development, testing, and situ
**ADE**: Tool Manager → Add MCP Server → stdio → specify command and args
<CodeGroup>
```typescript TypeScript maxLines=50
import { LettaClient } from '@letta-ai/letta-client'
// Self-hosted only
const client = new LettaClient({
baseUrl: "http://localhost:8283"
});
// Connect a stdio server (npx example - works in Docker!)
const stdioConfig = {
server_name: "github-server",
command: "npx",
args: ["-y", "@modelcontextprotocol/server-github"],
env: {"GITHUB_PERSONAL_ACCESS_TOKEN": "your-token"}
};
await client.tools.addMcpServer(stdioConfig);
// List available tools
const tools = await client.tools.listMcpToolsByServer("github-server");
// Add a tool to use with agents
const tool = await client.tools.addMcpTool("github-server", "create_repository");
```
```python title="python" maxLines=50
from letta_client import Letta
from letta_client.types import StdioServerConfig
@@ -43,30 +67,6 @@ tool = client.tools.add_mcp_tool(
mcp_tool_name="create_repository"
)
```
```typescript title="node.js" maxLines=50
import { LettaClient } from '@letta-ai/letta-client'
// Self-hosted only
const client = new LettaClient({
baseUrl: "http://localhost:8283"
});
// Connect a stdio server (npx example - works in Docker!)
const stdioConfig = {
server_name: "github-server",
command: "npx",
args: ["-y", "@modelcontextprotocol/server-github"],
env: {"GITHUB_PERSONAL_ACCESS_TOKEN": "your-token"}
};
await client.tools.addMcpServer(stdioConfig);
// List available tools
const tools = await client.tools.listMcpToolsByServer("github-server");
// Add a tool to use with agents
const tool = await client.tools.addMcpTool("github-server", "create_repository");
```
</CodeGroup>
## Docker Support

View File

@@ -105,6 +105,16 @@ docker run \
With password protection enabled, you will have to provide your password in the bearer token header in your API requests:
<CodeGroup>
```typescript TypeScript maxLines=50
// install letta-client with `npm install @letta-ai/letta-client`
import { LettaClient } from '@letta-ai/letta-client'
// create the client with the token set to your password
const client = new LettaClient({
baseUrl: "http://localhost:8283",
token: "yourpassword"
});
```
```python title="python" maxLines=50
# install letta_client with `pip install letta-client`
from letta_client import Letta
@@ -115,16 +125,6 @@ client = Letta(
token="yourpassword"
)
```
```typescript maxLines=50 title="node.js"
// install letta-client with `npm install @letta-ai/letta-client`
import { LettaClient } from '@letta-ai/letta-client'
// create the client with the token set to your password
const client = new LettaClient({
baseUrl: "http://localhost:8283",
token: "yourpassword"
});
```
```curl curl
curl --request POST \
--url http://localhost:8283/v1/agents/$AGENT_ID/messages \

Some files were not shown because too many files have changed in this diff Show More