chore: officially release 0.11.8 (#2794)

This commit is contained in:
Kian Jones
2025-09-09 11:32:54 -07:00
committed by GitHub
436 changed files with 59305 additions and 12269 deletions

115
.github/workflows/alembic-validation.yml vendored Normal file
View File

@@ -0,0 +1,115 @@
name: Alembic Migration Validation
on:
pull_request:
branches: [ main ]
pull_request_target:
branches: [ main ]
types: [labeled]
concurrency:
group: ${{ github.workflow }}-${{ github.event.pull_request.number || github.ref }}
cancel-in-progress: ${{ github.ref != 'refs/heads/main' }}
jobs:
changed-files:
# Run on pull_request OR on pull_request_target only when labeled "safe to test"
if: github.event_name == 'pull_request' || (github.event_name == 'pull_request_target' && contains(github.event.pull_request.labels.*.name, 'safe to test'))
runs-on: ubuntu-latest
name: changed-files
outputs:
all_changed_files: ${{ steps.changed-files.outputs.all_changed_files }}
any_changed: ${{ steps.changed-files.outputs.any_changed }}
steps:
- uses: actions/checkout@v4
with:
fetch-depth: 0
- name: Get changed files
id: changed-files
uses: tj-actions/changed-files@v44
with:
files: |
alembic/**
.github/workflows/alembic-validation.yml
test-sqlite:
needs: [ changed-files ]
if: ${{ needs.changed-files.outputs.any_changed == 'true' }}
runs-on: [self-hosted, medium]
timeout-minutes: 15
steps:
- name: Checkout
uses: actions/checkout@v4
- name: Install dependencies
shell: bash
working-directory: .
run: uv sync --no-install-project ${{ inputs.install-args || '--extra sqlite --extra external-tools --extra dev --extra cloud-tool-sandbox' }}
- name: Test alembic migration
working-directory: .
run: |
uv run alembic upgrade head
# kinda janky but I think this might not matter for sqlite?
# uv run alembic check
- name: Cleanup persistent data
if: ${{ always() }}
working-directory: .
run: |
echo "Cleaning up persistent data..."
sudo rm -rf ~/.letta || true
test-postgres:
needs: [ changed-files ]
if: ${{ needs.changed-files.outputs.any_changed == 'true' }}
runs-on: [self-hosted, medium]
timeout-minutes: 15
services:
postgres:
image: pgvector/pgvector:pg17
ports:
- 5432:5432
env:
POSTGRES_HOST_AUTH_METHOD: trust
POSTGRES_DB: postgres
POSTGRES_USER: postgres
options: >-
--health-cmd pg_isready
--health-interval 10s
--health-timeout 5s
--health-retries 5
steps:
- name: Checkout
uses: actions/checkout@v4
- name: Install dependencies
shell: bash
working-directory: .
run: uv sync --no-install-project ${{ inputs.install-args || '--extra postgres --extra external-tools --extra dev --extra cloud-tool-sandbox' }}
- name: Test alembic migration
working-directory: .
env:
LETTA_PG_PORT: 5432
LETTA_PG_USER: postgres
LETTA_PG_PASSWORD: postgres
LETTA_PG_DB: postgres
LETTA_PG_HOST: localhost
run: |
psql -h localhost -U postgres -d postgres -c 'CREATE EXTENSION IF NOT EXISTS vector;'
uv run alembic upgrade head
uv run alembic check
- name: Print docker logs if tests fail
if: ${{ failure() || cancelled() }}
run: |
echo "Printing Docker Logs..."
docker logs $(docker ps -aq --filter "ancestor=pgvector/pgvector:pg17") || true
- name: Cleanup containers and volumes
if: ${{ always() }}
run: |
echo "Cleaning up containers and volumes..."
docker stop $(docker ps -aq --filter "ancestor=pgvector/pgvector:pg17") || true
docker rm $(docker ps -aq --filter "ancestor=pgvector/pgvector:pg17") || true
docker volume prune -f || true
docker system prune -f || true

View File

@@ -1,59 +0,0 @@
name: Code Style Checks
on:
push:
branches: [ main ]
pull_request:
branches: [ main ]
jobs:
style-checks:
runs-on: ubuntu-latest
strategy:
matrix:
python-version: ["3.11"] # Removed 3.12+ as minimal sets the standard. Adjust Python version matrix if needed
steps:
- name: Checkout code
uses: actions/checkout@v4
with:
ref: ${{ github.head_ref }} # Checkout the PR branch
fetch-depth: 0 # Fetch all history for all branches and tags
- name: Set up python
id: setup-python
uses: actions/setup-python@v5
with:
python-version: ${{ matrix.python-version }}
- name: Install uv
uses: astral-sh/setup-uv@v6
with:
enable-cache: true
activate-environment: true
- name: Install Dependencies
run: |
uv sync --extra dev --extra postgres --extra external-tools
- name: Validate PR Title
if: github.event_name == 'pull_request'
uses: amannn/action-semantic-pull-request@v5
env:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
- name: Run Pyright
uses: jakebailey/pyright-action@v2
with:
python-version: ${{ matrix.python-version }}
level: "error"
continue-on-error: true
- name: Run isort
run: uv run isort --profile black --check-only --diff .
- name: Run Black
run: uv run black --check .
- name: Run Autoflake
run: uv run autoflake --remove-all-unused-imports --remove-unused-variables --in-place --recursive --ignore-init-module-imports .

View File

@@ -0,0 +1,51 @@
name: 🐍🧪 [Core] Integration Tests
on:
pull_request:
branches:
- main
pull_request_target:
branches:
- main
types: [labeled]
concurrency:
group: ${{ github.workflow }}-${{ github.event.pull_request.number || github.ref }}
cancel-in-progress: ${{ github.ref != 'refs/heads/main' }}
jobs:
integration-tests:
# Run on pull_request OR on pull_request_target only when labeled "safe to test"
if: github.event_name == 'pull_request' || (github.event_name == 'pull_request_target' && contains(github.event.pull_request.labels.*.name, 'safe to test'))
uses: ./.github/workflows/reusable-test-workflow.yml
with:
test-type: 'integration'
use-redis: true
changed-files-pattern: |
**
.github/workflows/reusable-test-workflow.yml
.github/workflows/core-integration-tests.yml
install-args: '--extra postgres --extra external-tools --extra dev --extra cloud-tool-sandbox'
timeout-minutes: 15
ref: ${{ github.event.pull_request.head.sha || github.sha }}
matrix-strategy: |
{
"fail-fast": false,
"matrix": {
"test_suite": [
"integration_test_summarizer.py",
"integration_test_async_tool_sandbox.py",
"integration_test_sleeptime_agent.py",
"integration_test_agent_tool_graph.py",
"integration_test_composio.py",
"integration_test_chat_completions.py",
"integration_test_multi_agent.py",
"integration_test_batch_api_cron_jobs.py",
"integration_test_batch_sdk.py",
"integration_test_builtin_tools.py",
"integration_test_turbopuffer.py",
"integration_test_human_in_the_loop.py"
]
}
}
secrets: inherit

67
.github/workflows/core-lint.yml vendored Normal file
View File

@@ -0,0 +1,67 @@
name: 🐍🧹 [Core] Lint and Test
on:
pull_request:
branches: [ main ]
concurrency:
group: ${{ github.workflow }}-${{ github.event.pull_request.number || github.ref }}
cancel-in-progress: ${{ github.ref != 'refs/heads/main' }}
jobs:
changed-files:
runs-on: ubuntu-latest
name: changed-files
outputs:
all_changed_files: ${{ steps.changed-files.outputs.all_changed_files }}
any_changed: ${{ steps.changed-files.outputs.any_changed }}
steps:
- uses: actions/checkout@v4
with:
fetch-depth: 0
- name: Get changed files
id: changed-files
uses: tj-actions/changed-files@v44
with:
files: |
letta/**
tests/**
*.py
pyproject.toml
.github/workflows/core-lint.yml
main:
needs: [ changed-files ]
if: ${{ needs.changed-files.outputs.any_changed == 'true' }}
runs-on: [self-hosted, medium]
strategy:
matrix:
python-version: ["3.12"] # Adjust Python version matrix if needed
steps:
- name: Checkout
uses: actions/checkout@v4
- name: Install dependencies
shell: bash
working-directory: .
run: uv sync --no-install-project ${{ inputs.install-args || '--extra postgres --extra external-tools --extra dev --extra cloud-tool-sandbox' }}
- name: Validate PR Title
if: github.event_name == 'pull_request'
uses: amannn/action-semantic-pull-request@v5
env:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
- name: Run Pyright
uses: jakebailey/pyright-action@v2
with:
python-version: ${{ matrix.python-version }}
level: "error"
continue-on-error: true
- name: Run Ruff Check
working-directory: .
run: uv run ruff check --config pyproject.toml --diff .
- name: Run Ruff Format
working-directory: .
run: uv run ruff format --config pyproject.toml --check --diff .

View File

@@ -0,0 +1,60 @@
name: 🐍👨‍🔬 [Core] Unit Tests (SQLite)
on:
pull_request:
branches:
- main
pull_request_target:
branches:
- main
types: [labeled]
concurrency:
group: ${{ github.workflow }}-${{ github.event.pull_request.number || github.ref }}
cancel-in-progress: ${{ github.ref != 'refs/heads/main' }}
jobs:
unit-tests:
# Run on pull_request OR on pull_request_target only when labeled "safe to test"
if: github.event_name == 'pull_request' || (github.event_name == 'pull_request_target' && contains(github.event.pull_request.labels.*.name, 'safe to test'))
uses: ./.github/workflows/reusable-test-workflow.yml
with:
test-type: 'sqlite'
use-redis: true
changed-files-pattern: |
apps/core/**
.github/workflows/reusable-test-workflow.yml
.github/workflows/core-unit-sqlite-test.yml
install-args: '--extra postgres --extra external-tools --extra dev --extra cloud-tool-sandbox --extra google --extra sqlite'
timeout-minutes: 15
ref: ${{ github.event.pull_request.head.sha || github.sha }}
matrix-strategy: |
{
"fail-fast": false,
"matrix": {
"include": [
{"test_suite": "test_client.py"},
{"test_suite": "test_sdk_client.py"},
{"test_suite": "test_server.py"},
{"test_suite": "test_tool_schema_parsing.py"},
{"test_suite": "test_tool_rule_solver.py"},
{"test_suite": "test_memory.py"},
{"test_suite": "test_utils.py"},
{"test_suite": "test_stream_buffer_readers.py"},
{"test_suite": "test_agent_serialization.py"},
{"test_suite": "test_optimistic_json_parser.py"},
{"test_suite": "test_llm_clients.py"},
{"test_suite": "test_letta_agent_batch.py"},
{"test_suite": "test_providers.py"},
{"test_suite": "test_sources.py"},
{"test_suite": "test_managers.py"},
{"test_suite": "sdk/"},
{"test_suite": "mcp_tests/", "use_experimental": true},
{"test_suite": "test_timezone_formatting.py"},
{"test_suite": "test_plugins.py"},
{"test_suite": "test_embeddings.py"}
]
}
}
secrets: inherit

60
.github/workflows/core-unit-test.yml vendored Normal file
View File

@@ -0,0 +1,60 @@
name: 🐍👨‍🔬 [Core] Unit Tests
on:
pull_request:
branches:
- main
pull_request_target:
branches:
- main
types: [labeled]
concurrency:
group: ${{ github.workflow }}-${{ github.event.pull_request.number || github.ref }}
cancel-in-progress: ${{ github.ref != 'refs/heads/main' }}
jobs:
unit-tests:
# Run on pull_request OR on pull_request_target only when labeled "safe to test"
if: github.event_name == 'pull_request' || (github.event_name == 'pull_request_target' && contains(github.event.pull_request.labels.*.name, 'safe to test'))
uses: ./.github/workflows/reusable-test-workflow.yml
with:
test-type: 'unit'
use-redis: true
changed-files-pattern: |
**
.github/workflows/reusable-test-workflow.yml
.github/workflows/core-unit-test.yml
install-args: '--extra postgres --extra external-tools --extra dev --extra cloud-tool-sandbox --extra google'
timeout-minutes: 15
ref: ${{ github.event.pull_request.head.sha || github.sha }}
matrix-strategy: |
{
"fail-fast": false,
"matrix": {
"include": [
{"test_suite": "test_client.py"},
{"test_suite": "test_sdk_client.py"},
{"test_suite": "test_server.py"},
{"test_suite": "test_managers.py"},
{"test_suite": "test_tool_schema_parsing.py"},
{"test_suite": "test_tool_rule_solver.py"},
{"test_suite": "test_memory.py"},
{"test_suite": "test_utils.py"},
{"test_suite": "test_stream_buffer_readers.py"},
{"test_suite": "test_agent_serialization.py"},
{"test_suite": "test_agent_serialization_v2.py"},
{"test_suite": "test_optimistic_json_parser.py"},
{"test_suite": "test_llm_clients.py"},
{"test_suite": "test_letta_agent_batch.py"},
{"test_suite": "test_providers.py"},
{"test_suite": "test_sources.py"},
{"test_suite": "sdk/"},
{"test_suite": "mcp_tests/", "use_experimental": true},
{"test_suite": "test_timezone_formatting.py"},
{"test_suite": "test_plugins.py"},
{"test_suite": "test_embeddings.py"}
]
}
}
secrets: inherit

20
.github/workflows/fern-check.yml vendored Normal file
View File

@@ -0,0 +1,20 @@
name: 🌿 Fern Check
on:
pull_request:
branches: [ main ]
concurrency:
group: ${{ github.workflow }}-${{ github.event.pull_request.number || github.ref }}
cancel-in-progress: ${{ github.ref != 'refs/heads/main' }}
jobs:
run:
runs-on: [self-hosted, small]
steps:
- name: Checkout repository
uses: actions/checkout@v4
- name: Check API is valid
working-directory: fern
run: fern check

37
.github/workflows/fern-docs-preview.yml vendored Normal file
View File

@@ -0,0 +1,37 @@
name: Preview Docs
on:
pull_request:
paths:
- 'fern/**'
concurrency:
group: ${{ github.workflow }}-${{ github.event.pull_request.number || github.ref }}
cancel-in-progress: ${{ github.ref != 'refs/heads/main' }}
jobs:
run:
runs-on: [self-hosted, small]
permissions: write-all
steps:
- name: Checkout repository
uses: actions/checkout@v4
with:
submodules: true
- name: Generate preview URL
id: generate-docs
working-directory: fern
env:
FERN_TOKEN: ${{ secrets.FERN_TOKEN }}
run: |
OUTPUT=$(fern generate --docs --preview 2>&1) || true
echo "$OUTPUT"
URL=$(echo "$OUTPUT" | grep -oP 'Published docs to \K.*(?= \()')
echo "Preview URL: $URL"
echo "🌿 Preview your docs: $URL" > preview_url.txt
- name: Comment URL in PR
uses: thollander/actions-comment-pull-request@v3
with:
file-path: fern/preview_url.txt

21
.github/workflows/fern-docs-publish.yml vendored Normal file
View File

@@ -0,0 +1,21 @@
name: 🌿 Publish Docs
on:
push:
branches: [ main ]
jobs:
run:
runs-on: [self-hosted, medium]
if: ${{ github.event_name == 'push' && contains(github.ref, 'refs/heads/main') && github.run_number > 1 }}
steps:
- name: Checkout repository
uses: actions/checkout@v4
with:
submodules: true
- name: Publish Docs
working-directory: .
env:
FERN_TOKEN: ${{ secrets.FERN_TOKEN }}
run: fern generate --docs --log-level debug

View File

@@ -0,0 +1,168 @@
name: 🌿 Preview Python SDK
on:
pull_request:
branches:
- main
pull_request_target:
branches:
- main
types: [labeled]
push:
branches:
- main
paths:
- 'fern/openapi.json'
- 'fern/openapi-overrides.yml'
concurrency:
group: ${{ github.workflow }}-${{ github.event.pull_request.number || github.ref }}
cancel-in-progress: ${{ github.ref != 'refs/heads/main' }}
jobs:
changed-files:
# Run on pull_request OR on pull_request_target only when labeled "safe to test"
if: github.event_name == 'pull_request' || (github.event_name == 'pull_request_target' && contains(github.event.pull_request.labels.*.name, 'safe to test'))
runs-on: [self-hosted, small]
name: changed-files
outputs:
all_changed_files: ${{ steps.changed-files.outputs.all_changed_files }}
any_changed: ${{ steps.changed-files.outputs.any_changed }}
steps:
- uses: actions/checkout@v4
with:
submodules: true
fetch-depth: 0
- name: Get changed files
id: changed-files
uses: tj-actions/changed-files@v44
with:
files: |
fern/openapi.json
fern/openapi-overrides.yml
preview-python-sdk:
needs: [changed-files]
if: ${{ needs.changed-files.outputs.any_changed == 'true' }}
name: preview-python-sdk
runs-on: [self-hosted, medium]
outputs:
cache-key: ${{ steps.cache-key.outputs.key }}
services:
postgres:
image: pgvector/pgvector:pg17
env:
POSTGRES_HOST_AUTH_METHOD: trust
POSTGRES_DB: postgres
POSTGRES_PASSWORD: postgres
POSTGRES_USER: postgres
ports:
- 5432:5432
options: >-
--health-cmd pg_isready
--health-interval 10s
--health-timeout 5s
--health-retries 5
steps:
- name: Checkout repo
uses: actions/checkout@v4
with:
submodules: true
- name: Generate cache key
id: cache-key
run: |
echo "key=sdk-${{ github.ref_name }}-${{ hashFiles('fern/*', 'pyproject.toml') }}" >> $GITHUB_OUTPUT
- name: Try to restore SDK cache
id: restore-cache
uses: actions/cache/restore@v4
with:
path: |
fern/.preview/fern-python-sdk/
key: ${{ steps.cache-key.outputs.key }}
- name: Inject env vars into environment
working-directory: .
run: |
while IFS= read -r line || [[ -n "$line" ]]; do
if [[ -n "$line" ]]; then
value=$(echo "$line" | cut -d= -f2-)
echo "::add-mask::$value"
echo "$line" >> $GITHUB_ENV
fi
done < <(letta_secrets_helper --env dev --service ci)
- name: Debug environment
shell: bash
run: |
echo "=== Environment Debug ==="
echo "PATH: $PATH"
echo "USER: $(whoami)"
echo "HOME: $HOME"
echo "Shell: $SHELL"
echo "Working directory: $(pwd)"
echo ""
echo "=== UV Debug ==="
which uv || echo "uv not found in PATH"
ls -la /usr/local/bin/uv || echo "/usr/local/bin/uv not found"
ls -la /home/ci-runner/.local/bin/uv || echo "ci-runner uv not found"
echo ""
echo "=== Test uv command ==="
uv --version || echo "uv --version failed"
- name: Install dependencies
shell: bash
working-directory: .
run: uv sync --no-install-project ${{ inputs.install-args || '--extra postgres --extra external-tools --extra dev --extra cloud-tool-sandbox' }}
- name: Migrate database
working-directory: .
env:
LETTA_PG_PORT: 5432
LETTA_PG_USER: postgres
LETTA_PG_PASSWORD: postgres
LETTA_PG_DB: postgres
LETTA_PG_HOST: localhost
run: |
psql -h localhost -U postgres -d postgres -c 'CREATE EXTENSION vector'
uv run alembic upgrade head
- name: Run letta server
working-directory: .
env:
LETTA_PG_DB: postgres
LETTA_PG_USER: postgres
LETTA_PG_PASSWORD: postgres
LETTA_PG_HOST: localhost
LETTA_PG_PORT: 5432
OPENAI_API_KEY: ${{ env.OPENAI_API_KEY }}
E2B_SANDBOX_TEMPLATE_ID: ${{ env.E2B_SANDBOX_TEMPLATE_ID }}
run: |
# Run server in background
uv run letta server &
# Wait for server to be ready
timeout 60 bash -c 'until curl -s http://localhost:8283/health; do sleep 1; done'
- name: Generate Python SDK Preview
if: steps.restore-cache.outputs.cache-hit != 'true'
working-directory: .
env:
FERN_TOKEN: ${{ secrets.FERN_TOKEN }}
run: |
fern generate --group python-sdk --preview
cd fern/.preview/fern-python-sdk
poetry install
poetry build --format wheel
poetry run mypy .
poetry run pytest -rP tests/custom/test_client.py --env localhost
ls -lah
- name: Save SDK to cache
if: steps.restore-cache.outputs.cache-hit != 'true'
uses: actions/cache/save@v4
with:
path: |
fern/.preview/fern-python-sdk/
key: ${{ steps.cache-key.outputs.key }}

View File

@@ -0,0 +1,50 @@
name: 🌿 Release Python SDK
on:
workflow_dispatch:
inputs:
version:
description: "The version of the Python SDK that you would like to release"
required: true
type: string
workflow_run:
workflows: ["🌿 Preview Python SDK"]
types:
- completed
branches:
- main
jobs:
release:
if: |
github.event_name == 'workflow_dispatch' ||
(github.event_name == 'workflow_run' &&
github.event.workflow_run.event == 'push' &&
github.event.workflow_run.conclusion == 'success')
runs-on: ubuntu-latest
steps:
- name: Checkout repo
uses: actions/checkout@v4
with:
submodules: true
- name: Download Fern
run: npm install -g fern-api
- name: Generate Python SDK
working-directory: .
env:
FERN_TOKEN: ${{ secrets.FERN_TOKEN }}
PYPI_TOKEN: ${{ secrets.PYPI_TOKEN }}
run: |
if [ "${{ github.event_name }}" = "workflow_dispatch" ]; then
fern generate --group python-sdk --version ${{ inputs.version }} --log-level debug
else
fern generate --group python-sdk --log-level debug
fi
- name: Publish Docs
working-directory: .
env:
FERN_TOKEN: ${{ secrets.FERN_TOKEN }}
run: fern generate --docs

View File

@@ -0,0 +1,117 @@
name: 🌿 Preview TypeScript SDK
on:
pull_request:
branches:
- main
push:
branches:
- main
paths:
- 'fern/openapi.json'
- 'fern/openapi-overrides.yml'
concurrency:
group: ${{ github.workflow }}-${{ github.event.pull_request.number || github.ref }}
cancel-in-progress: ${{ github.ref != 'refs/heads/main' }}
jobs:
changed-files:
runs-on: [self-hosted, small]
name: changed-files
outputs:
all_changed_files: ${{ steps.changed-files.outputs.all_changed_files }}
any_changed: ${{ steps.changed-files.outputs.any_changed }}
steps:
- uses: actions/checkout@v4
with:
submodules: true
fetch-depth: 0
- name: Get changed files
id: changed-files
uses: tj-actions/changed-files@v44
with:
files: |
fern/openapi.json
fern/openapi-overrides.yml
preview-typescript-sdk:
if: ${{ needs.changed-files.outputs.any_changed == 'true' }}
needs: [changed-files]
runs-on: [self-hosted, medium]
services:
postgres:
image: pgvector/pgvector:pg17
env:
POSTGRES_HOST_AUTH_METHOD: trust
POSTGRES_DB: postgres
POSTGRES_PASSWORD: postgres
POSTGRES_USER: postgres
ports:
- 5432:5432
options: >-
--health-cmd pg_isready
--health-interval 10s
--health-timeout 5s
--health-retries 5
steps:
- name: Checkout repo
uses: actions/checkout@v3
with:
submodules: true
- name: Install dependencies
shell: bash
working-directory: .
run: uv sync --no-install-project ${{ inputs.install-args || '--extra postgres --extra external-tools --extra dev --extra cloud-tool-sandbox' }}
- name: Inject env vars into environment
working-directory: .
run: |
while IFS= read -r line || [[ -n "$line" ]]; do
if [[ -n "$line" ]]; then
value=$(echo "$line" | cut -d= -f2-)
echo "::add-mask::$value"
echo "$line" >> $GITHUB_ENV
fi
done < <(letta_secrets_helper --env dev --service ci)
- name: Migrate database
working-directory: .
env:
LETTA_PG_PORT: 5432
LETTA_PG_USER: postgres
LETTA_PG_PASSWORD: postgres
LETTA_PG_DB: postgres
LETTA_PG_HOST: localhost
run: |
psql -h localhost -U postgres -d postgres -c 'CREATE EXTENSION vector'
uv run alembic upgrade head
- name: Run letta server
working-directory: .
env:
LETTA_PG_DB: postgres
LETTA_PG_USER: postgres
LETTA_PG_PASSWORD: postgres
LETTA_PG_HOST: localhost
LETTA_PG_PORT: 5432
OPENAI_API_KEY: ${{ env.OPENAI_API_KEY }}
E2B_SANDBOX_TEMPLATE_ID: ${{ env.E2B_SANDBOX_TEMPLATE_ID }}
run: |
# Run server in background
uv run letta server &
# Wait for server to be ready
timeout 60 bash -c 'until curl -s http://localhost:8283/health; do sleep 1; done'
- name: Generate TypeScript SDK Preview
working-directory: .
env:
LETTA_ENV: localhost
FERN_TOKEN: ${{ secrets.FERN_TOKEN }}
run: |
fern generate --group ts-sdk --preview
cd fern/.preview/fern-typescript-node-sdk
yarn install
yarn build
yarn test tests/custom.test.ts

View File

@@ -0,0 +1,50 @@
name: 🌿 Release TypeScript SDK
on:
workflow_dispatch:
inputs:
version:
description: "The version of the TypeScript SDK that you would like to release"
required: true
type: string
workflow_run:
workflows: ["🌿 Preview TypeScript SDK"]
types:
- completed
branches:
- main
jobs:
release:
if: |
github.event_name == 'workflow_dispatch' ||
(github.event_name == 'workflow_run' &&
github.event.workflow_run.event == 'push' &&
github.event.workflow_run.conclusion == 'success')
runs-on: ubuntu-latest
steps:
- name: Checkout repo
uses: actions/checkout@v4
with:
submodules: true
- name: Download Fern
run: npm install -g fern-api
- name: Generate TypeScript SDK
working-directory: .
env:
FERN_TOKEN: ${{ secrets.FERN_TOKEN }}
NPM_TOKEN: ${{ secrets.NPM_TOKEN }}
run: |
if [ "${{ github.event_name }}" = "workflow_dispatch" ]; then
fern generate --group ts-sdk --version ${{ inputs.version }} --log-level debug
else
fern generate --group ts-sdk --log-level debug
fi
- name: Publish Docs
working-directory: .
env:
FERN_TOKEN: ${{ secrets.FERN_TOKEN }}
run: fern generate --docs

161
.github/workflows/lint-command.yml vendored Normal file
View File

@@ -0,0 +1,161 @@
name: Lint Command
on:
issue_comment:
types: [created]
workflow_dispatch:
inputs:
pr_number:
description: 'PR number to run lint on'
required: true
permissions:
contents: write
pull-requests: write
issues: write
jobs:
lint-command:
name: Handle /lint command
runs-on: ubuntu-latest
if: |
(github.event_name == 'workflow_dispatch' && github.event.inputs.pr_number) ||
(github.event_name == 'issue_comment' &&
github.event.issue.pull_request &&
contains(github.event.comment.body, '/lint') &&
startsWith(github.event.comment.body, '/lint'))
steps:
- name: Add acknowledgment reaction
if: github.event_name == 'issue_comment'
uses: peter-evans/create-or-update-comment@v4
with:
comment-id: ${{ github.event.comment.id }}
reactions: eyes
- name: Check permissions
if: github.event_name == 'issue_comment'
uses: actions/github-script@v7
with:
script: |
const { data: collaborator } = await github.rest.repos.getCollaboratorPermissionLevel({
owner: context.repo.owner,
repo: context.repo.repo,
username: context.actor
});
if (!['admin', 'write'].includes(collaborator.permission)) {
github.rest.issues.createComment({
owner: context.repo.owner,
repo: context.repo.repo,
issue_number: context.issue.number,
body: '❌ You need write permissions to run lint commands.'
});
core.setFailed('Insufficient permissions');
}
- name: Get PR information
id: pr
uses: actions/github-script@v7
with:
script: |
const pr_number = context.eventName === 'issue_comment'
? context.issue.number
: ${{ github.event.inputs.pr_number || 'null' }};
const { data: pr } = await github.rest.pulls.get({
owner: context.repo.owner,
repo: context.repo.repo,
pull_number: pr_number
});
core.setOutput('branch', pr.head.ref);
core.setOutput('repo', pr.head.repo.full_name);
core.setOutput('sha', pr.head.sha);
core.setOutput('number', pr_number);
- name: Checkout PR branch
uses: actions/checkout@v4
with:
ref: ${{ steps.pr.outputs.branch }}
token: ${{ secrets.GITHUB_TOKEN }}
fetch-depth: 0
- name: Set up python 3.12
id: setup-python
uses: actions/setup-python@v5
with:
python-version: 3.12
- name: Install uv
uses: astral-sh/setup-uv@v6
with:
enable-cache: false
activate-environment: true
- name: Install dependencies
run: uv sync --extra dev --extra postgres --extra external-tools
working-directory: .
# - name: Run ruff check with fixes
# run: uv run ruff check --fix .
#
# - name: Run ruff format
# run: uv run ruff format .
- name: Run isort, black, autoflake
run: uv run isort . --profile black && uv run black . && uv run autoflake --remove-all-unused-imports --remove-unused-variables --in-place --recursive --ignore-init-module-imports .
working-directory: .
- name: Check for changes
id: changes
run: |
if [[ -n $(git status --porcelain) ]]; then
echo "changes=true" >> $GITHUB_OUTPUT
else
echo "changes=false" >> $GITHUB_OUTPUT
fi
- name: Commit and push changes
if: steps.changes.outputs.changes == 'true'
run: |
git config --global user.name "github-actions[bot]"
git config --global user.email "github-actions[bot]@users.noreply.github.com"
git add .
git commit -m "style: lint / fmt
Triggered by /lint command from @${{ github.actor }}"
git push
- name: Comment on success
if: steps.changes.outputs.changes == 'true'
uses: peter-evans/create-or-update-comment@v4
with:
issue-number: ${{ steps.pr.outputs.number }}
body: |
✅ **Lint fixes applied successfully!**
Ruff has automatically fixed linting issues and formatted the code.
Changes have been committed to the PR branch.
- name: Comment on no changes
if: steps.changes.outputs.changes == 'false'
uses: peter-evans/create-or-update-comment@v4
with:
issue-number: ${{ steps.pr.outputs.number }}
body: |
✅ **No lint issues found!**
The code is already properly formatted and passes all linting checks.
- name: Comment on failure
if: failure()
uses: peter-evans/create-or-update-comment@v4
with:
issue-number: ${{ steps.pr.outputs.number }}
body: |
❌ **Lint command failed!**
There was an error while running the lint fixes. Please check the [workflow run](${{ github.server_url }}/${{ github.repository }}/actions/runs/${{ github.run_id }}) for details.

View File

@@ -0,0 +1,460 @@
name: Reusable Test Workflow
on:
workflow_call:
inputs:
test-type:
description: 'Type of tests to run (unit, integration, docker, send-message, sqlite)'
required: true
type: string
core-directory:
description: 'Working directory for commands. Uses . (root) by default.'
required: false
type: string
default: '.'
install-args:
description: 'uv sync arguments'
required: true
type: string
test-command:
description: 'Command to run tests'
required: false
type: string
default: 'uv run --frozen pytest -svv'
test-path-prefix:
description: 'Prefix for test path (e.g., tests/)'
required: false
type: string
default: 'tests/'
timeout-minutes:
description: 'Timeout in minutes'
required: false
type: number
default: 15
runner:
description: 'Runner to use'
required: false
type: string
default: '["self-hosted", "small"]'
matrix-strategy:
description: 'JSON string for matrix strategy'
required: false
type: string
default: '{}'
changed-files-pattern:
description: 'Pattern for changed files detection'
required: false
type: string
default: |
**
.github/workflows/reusable-test-workflow.yml
skip-fern-generation:
description: 'Skip Fern SDK generation'
required: false
type: boolean
default: false
use-docker:
description: 'Use Docker for tests'
required: false
type: boolean
default: false
ref:
description: 'Git ref to wait for checks on'
required: false
type: string
default: ${{ github.sha }}
use-redis:
description: 'Use Redis for tests'
required: false
type: boolean
default: false
jobs:
changed-files:
runs-on: ${{ fromJSON(inputs.runner) }}
name: changed-files
outputs:
all_changed_files: ${{ steps.changed-files.outputs.all_changed_files }}
any_changed: ${{ steps.changed-files.outputs.any_changed }}
steps:
- uses: actions/checkout@v4
with:
fetch-depth: 0
- name: Get changed files
id: changed-files
uses: tj-actions/changed-files@v46.0.4
with:
files: ${{ inputs.changed-files-pattern }}
cache-check:
needs: [changed-files]
runs-on: ${{ fromJSON(inputs.runner) }}
name: Check cache key
outputs:
cache_key: ${{ steps.cache-key.outputs.key }}
cache_hit: ${{ steps.cache.outputs.cache-hit }}
steps:
- name: Checkout
uses: actions/checkout@v4
- name: Generate cache key
if: inputs.skip-fern-generation != true || (!contains(needs.changed-files.outputs.all_changed_files, 'fern/openapi.json') && !contains(needs.changed-files.outputs.all_changed_files, 'fern/openapi-overrides.yml'))
id: cache-key
run: |
echo "key=sdk-${{ github.ref_name }}-${{ hashFiles('fern/*', 'pyproject.toml') }}" >> $GITHUB_OUTPUT
- name: Restore SDK cache
# skip if "skip-fern-generation" is true or if the upstream workflow would've generated an sdk preview (changes to openapi files)
if: inputs.skip-fern-generation != true || (!contains(needs.changed-files.outputs.all_changed_files, 'fern/openapi.json') && !contains(needs.changed-files.outputs.all_changed_files, 'fern/openapi-overrides.yml'))
id: cache
uses: actions/cache/restore@v4
with:
path: |
fern/.preview/fern-python-sdk/
key: ${{ steps.cache-key.outputs.key }}
fail-on-cache-miss: false
block-until-sdk-preview-finishes:
needs: [changed-files, cache-check]
if: |
needs.cache-check.outputs.cache_hit != 'true'
timeout-minutes: ${{ inputs.timeout-minutes }}
runs-on: ${{ fromJSON(inputs.runner) }}
name: block-until-sdk-preview-finishes
steps:
- name: Debug ref information
run: |
echo "Input ref: ${{ inputs.ref }}"
echo "GitHub SHA: ${{ github.sha }}"
echo "GitHub ref: ${{ github.ref }}"
echo "PR head SHA: ${{ github.event.pull_request.head.sha }}"
echo "Event name: ${{ github.event_name }}"
- name: Wait for Preview SDK workflow
if: inputs.skip-fern-generation != true || (!contains(needs.changed-files.outputs.all_changed_files, 'fern/openapi.json') && !contains(needs.changed-files.outputs.all_changed_files, 'fern/openapi-overrides.yml'))
env:
GH_TOKEN: ${{ secrets.GITHUB_TOKEN }}
run: |
echo "Waiting for 'preview-python-sdk' check to complete on ref: ${{ inputs.ref }}"
# Wait for the check to complete with timeout
timeout_seconds=1800
interval_seconds=60
elapsed=0
while [ $elapsed -lt $timeout_seconds ]; do
echo "Checking status... (elapsed: ${elapsed}s)"
# Get check runs using pr checks syntax with branch name or PR number
if [ "${{ github.event_name }}" = "pull_request" ]; then
pr_identifier="${{ github.event.pull_request.number }}"
else
pr_identifier="${{ github.ref_name }}"
fi
check_info=$(gh pr checks "$pr_identifier" -R ${{ github.repository }} --json name,state,startedAt \
| jq -r '.[] | select(.name == "preview-python-sdk") | [.startedAt, .state] | @tsv' | sort -r | head -1 | cut -f2)
if [ -n "$check_info" ]; then
echo "Check state: $check_info"
if [ "$check_info" = "SUCCESS" ] || [ "$check_info" = "SKIPPED" ]; then
echo "Check completed with state: $check_info"
exit 0
elif [ "$check_info" = "FAILURE" ] || [ "$check_info" = "CANCELLED" ]; then
echo "❌ Preview Python SDK build failed with state: $check_info"
echo "🚫 Blocking dependent test jobs to prevent extraneous failures"
echo "📋 To fix: Check the 'preview-python-sdk' job logs for build errors"
exit 1
fi
else
echo "Check 'preview-python-sdk' not found yet"
fi
sleep $interval_seconds
elapsed=$((elapsed + interval_seconds))
done
echo "Timeout waiting for check to complete"
exit 1
test-run:
needs: [changed-files, block-until-sdk-preview-finishes]
if: |
always() &&
needs.changed-files.outputs.any_changed == 'true' &&
(needs.block-until-sdk-preview-finishes.result == 'success' ||
needs.block-until-sdk-preview-finishes.result == 'skipped')
runs-on: ${{ fromJSON(inputs.runner) }}
timeout-minutes: ${{ inputs.timeout-minutes }}
strategy: ${{ fromJSON(inputs.matrix-strategy) }}
services:
postgres:
image: pgvector/pgvector:pg17
ports:
# avoids conflict with docker postgres
- ${{ inputs.use-docker && '9999:5432' || '5432:5432' }}
env:
POSTGRES_HOST_AUTH_METHOD: trust
POSTGRES_DB: postgres
POSTGRES_USER: postgres
options: >-
--health-cmd pg_isready
--health-interval 10s
--health-timeout 5s
--health-retries 5
redis:
image: ${{ inputs.use-redis && 'redis:8-alpine' || '' }}
options: >-
--health-cmd "redis-cli ping"
--health-interval 10s
--health-timeout 5s
--health-retries 5
ports:
- 6379:6379
steps:
- name: Checkout
uses: actions/checkout@v4
- name: Install uv
uses: astral-sh/setup-uv@v6
with:
enable-cache: true
- name: Set core directory
id: detect-core-dir
run: |
echo "dir=${{ inputs.core-directory }}" >> $GITHUB_OUTPUT
echo "detected=manual" >> $GITHUB_OUTPUT
echo "Using core directory: $(cat $GITHUB_OUTPUT | grep '^dir=' | cut -d'=' -f2)"
- name: Generate cache key
if: inputs.skip-fern-generation != true || (!contains(needs.changed-files.outputs.all_changed_files, 'fern/openapi.json') && !contains(needs.changed-files.outputs.all_changed_files, 'fern/openapi-overrides.yml'))
id: cache-key
run: |
echo "key=sdk-${{ github.ref_name }}-${{ hashFiles('fern/*', 'pyproject.toml') }}" >> $GITHUB_OUTPUT
- name: Restore SDK cache
# skip if "skip-fern-generation" is true or if the upstream workflow would've generated an sdk preview (changes to openapi files)
if: inputs.skip-fern-generation != true || (!contains(needs.changed-files.outputs.all_changed_files, 'fern/openapi.json') && !contains(needs.changed-files.outputs.all_changed_files, 'fern/openapi-overrides.yml'))
id: restore-sdk-cache
uses: actions/cache/restore@v4
with:
path: |
fern/.preview/fern-python-sdk/
key: ${{ steps.cache-key.outputs.key }}
fail-on-cache-miss: false
- name: Check SDK cache availability
if: (inputs.skip-fern-generation != true || (!contains(needs.changed-files.outputs.all_changed_files, 'fern/openapi.json') && !contains(needs.changed-files.outputs.all_changed_files, 'fern/openapi-overrides.yml'))) && steps.restore-sdk-cache.outputs.cache-hit != 'true'
run: |
echo "❌ Preview Python SDK cache expired or missing!"
echo "📦 Cache key: ${{ steps.cache-key.outputs.key }}"
echo "🔄 To fix: Re-run the 'preview-python-sdk' workflow job to regenerate the SDK"
echo "💡 This can happen when:"
echo " - The cache entry has expired"
echo " - Dependencies in fern/* or pyproject.toml have changed"
echo " - The preview-python-sdk job hasn't run successfully for this branch/commit"
exit 1
- name: Install dependencies with retry
shell: bash
working-directory: .
run: |
uv sync --no-install-project ${{ inputs.install-args }}
- name: Install custom SDK
if: inputs.skip-fern-generation != true
working-directory: .
run: |
echo "Fixing Fern SDK pyproject.toml for uv compatibility..."
SDK_PYPROJECT="fern/.preview/fern-python-sdk/pyproject.toml"
VERSION=$(grep -A 10 '^\[tool\.poetry\]' "$SDK_PYPROJECT" | grep '^version' | head -1 | cut -d'"' -f2)
head -n 2 < fern/.preview/fern-python-sdk/pyproject.toml > fern/.preview/fern-python-sdk/pyproject.toml.tmp
echo "version = \"$VERSION\"" >> fern/.preview/fern-python-sdk/pyproject.toml.tmp
tail -n +3 fern/.preview/fern-python-sdk/pyproject.toml >> fern/.preview/fern-python-sdk/pyproject.toml.tmp
mv fern/.preview/fern-python-sdk/pyproject.toml.tmp fern/.preview/fern-python-sdk/pyproject.toml
uv pip install -e fern/.preview/fern-python-sdk/.
- name: Migrate database
if: inputs.use-docker != true && inputs.test-type != 'sqlite'
working-directory: .
env:
LETTA_PG_PORT: 5432
LETTA_PG_USER: postgres
LETTA_PG_PASSWORD: postgres
LETTA_PG_DB: postgres
LETTA_PG_HOST: localhost
run: |
psql -h localhost -U postgres -d postgres -c 'CREATE EXTENSION vector'
uv run alembic upgrade head
- name: Inject env vars into environment
working-directory: .
run: |
# Get secrets and mask them before adding to environment
while IFS= read -r line || [[ -n "$line" ]]; do
if [[ -n "$line" ]]; then
value=$(echo "$line" | cut -d= -f2-)
echo "::add-mask::$value"
echo "$line" >> $GITHUB_ENV
fi
done < <(letta_secrets_helper --env dev --service ci)
- name: Docker setup for Docker tests
if: inputs.use-docker
run: |
mkdir -p /home/ci-runner/.letta/logs
sudo chown -R $USER:$USER /home/ci-runner/.letta/logs
chmod -R 755 /home/ci-runner/.letta/logs
- name: Build and run docker dev server
if: inputs.use-docker
env:
LETTA_PG_DB: letta
LETTA_PG_USER: letta
LETTA_PG_PASSWORD: letta
LETTA_PG_PORT: 5432
OPENAI_API_KEY: ${{ env.OPENAI_API_KEY }}
run: |
cd libs/config-core-deploy
docker compose -f compose.yaml up --build -d
- name: Wait for Docker service
if: inputs.use-docker
working-directory: ${{ steps.detect-core-dir.outputs.dir }}
run: |
bash scripts/wait_for_service.sh localhost:8083 -- echo "Service is ready"
- name: Run tests
working-directory: ${{ steps.detect-core-dir.outputs.dir }}
env:
# Database configuration (shared, but values depend on Docker usage)
LETTA_PG_PORT: 5432
LETTA_PG_USER: ${{ inputs.use-docker && 'letta' || 'postgres' }}
LETTA_PG_PASSWORD: ${{ inputs.use-docker && 'letta' || 'postgres' }}
LETTA_PG_DB: ${{ inputs.use-docker && 'letta' || 'postgres' }}
LETTA_PG_HOST: localhost
# Server configuration (conditional)
LETTA_SERVER_PASS: test_server_token
# LLM Provider API Keys (shared across all test types)
OPENAI_API_KEY: ${{ env.OPENAI_API_KEY }}
ANTHROPIC_API_KEY: ${{ env.ANTHROPIC_API_KEY }}
GEMINI_API_KEY: ${{ env.GEMINI_API_KEY }}
GROQ_API_KEY: ${{ env.GROQ_API_KEY }}
AZURE_API_KEY: ${{ env.AZURE_API_KEY }}
AZURE_BASE_URL: ${{ secrets.AZURE_BASE_URL }}
DEEPSEEK_API_KEY: ${{ env.DEEPSEEK_API_KEY }}
LETTA_MISTRAL_API_KEY: ${{ secrets.LETTA_MISTRAL_API_KEY }}
# External service API Keys (shared across all test types)
COMPOSIO_API_KEY: ${{ env.COMPOSIO_API_KEY }}
E2B_API_KEY: ${{ env.E2B_API_KEY }}
E2B_SANDBOX_TEMPLATE_ID: ${{ env.E2B_SANDBOX_TEMPLATE_ID }}
# Turbopuffer flags
LETTA_USE_TPUF: true
LETTA_TPUF_API_KEY: ${{ env.LETTA_TPUF_API_KEY }}
# Encryption key
LETTA_ENCRYPTION_KEY: ${{ env.LETTA_ENCRYPTION_KEY }}
# Google Cloud (shared across all test types)
GOOGLE_CLOUD_PROJECT: ${{ secrets.GOOGLE_CLOUD_PROJECT }}
GOOGLE_CLOUD_LOCATION: ${{ secrets.GOOGLE_CLOUD_LOCATION }}
# Feature flags (shared across all test types)
LETTA_ENABLE_BATCH_JOB_POLLING: true
LETTA_GEMINI_FORCE_MINIMUM_THINKING_BUDGET: true
LETTA_GEMINI_MAX_RETRIES: 10
# Pinecone flags
LETTA_PINECONE_API_KEY: ${{ secrets.LETTA_PINECONE_API_KEY }}
LETTA_ENABLE_PINECONE: true
EXA_API_KEY: ${{ env.EXA_API_KEY }}
# Docker-specific environment variables
PYTHONPATH: ${{ inputs.use-docker && format('{0}:{1}', github.workspace, env.PYTHONPATH) || '' }}
LETTA_REDIS_HOST: localhost
run: |
set -o xtrace
# Set LETTA_SERVER_URL only for Docker tests
if [[ "${{ inputs.use-docker }}" == "true" ]]; then
export LETTA_SERVER_URL="http://localhost:8083"
fi
# Set LLM_CONFIG_FILE only for send-message tests
if [[ "${{ inputs.test-type }}" == "send-message" ]]; then
export LLM_CONFIG_FILE="${{ matrix.config_file }}"
fi
# Set Ollama base URL only for Ollama tests
if [[ "${{ inputs.test-type }}" == "integration" && "${{ inputs.runner }}" == *"ollama"* ]]; then
export LLM_CONFIG_FILE="ollama.json"
export OLLAMA_BASE_URL="http://localhost:11434"
fi
# Set LMStudio base URL only for LMStudio tests
if [[ "${{ inputs.test-type }}" == "integration" && "${{ inputs.runner }}" == *"lmstudio"* ]]; then
export LLM_CONFIG_FILE="lmstudio.json"
export LMSTUDIO_BASE_URL="http://localhost:1234"
fi
# Set VLLM base URL only for VLLM tests
if [[ "${{ inputs.test-type }}" == "integration" && "${{ inputs.runner }}" == *"vllm"* ]]; then
export LLM_CONFIG_FILE="vllm.json"
export VLLM_BASE_URL="http://localhost:8000"
fi
uv pip install pytest-github-actions-annotate-failures
# Handle different matrix variable names and test commands based on test type
if [[ "${{ inputs.test-type }}" == "integration" ]]; then
uv pip install letta
uv pip show letta
uv pip show letta-client
uv run --frozen pytest -svv ${{ inputs.test-path-prefix }}${{ matrix.test_suite }}
elif [[ "${{ inputs.test-type }}" == "unit" ]]; then
uv pip show letta-client
uv run --frozen pytest -svv ${{ inputs.test-path-prefix }}${{ matrix.test_suite }}
elif [[ "${{ inputs.test-type }}" == "send-message" ]]; then
uv run --frozen pytest -s -vv tests/integration_test_send_message.py --maxfail=1 --durations=10
elif [[ "${{ inputs.test-type }}" == "docker" ]]; then
uv run --frozen pytest -s tests/test_client.py
elif [[ "${{ inputs.test-type }}" == "sqlite" ]]; then
# force sqlite
unset LETTA_PG_USER
unset LETTA_PG_PASSWORD
unset LETTA_PG_DB
unset LETTA_PG_HOST
uv pip show letta-client
uv run alembic upgrade head
uv run --frozen pytest -svv ${{ inputs.test-path-prefix }}${{ matrix.test_suite }}
else
${{ inputs.test-command }}
fi
- name: Remove sqlite db
if: ${{ always() && inputs.test-type == 'sqlite' }}
run: sudo rm -rf ~/.letta || true
- name: Print docker logs if tests fail
if: ${{ (failure() || cancelled()) && inputs.use-docker }}
working-directory: libs/config-core-deploy
run: |
echo "Printing Docker Logs..."
docker compose -f compose.yaml logs
- name: Stop docker
if: ${{ always() && inputs.use-docker }}
working-directory: libs/config-core-deploy
run: |
docker compose -f compose.yaml down --volumes
sudo rm -rf .persist

View File

@@ -1,157 +0,0 @@
name: Send Message SDK Tests
on:
pull_request_target:
# branches: [main] # TODO: uncomment before merge
types: [labeled]
paths:
- 'letta/**'
jobs:
send-messages:
# Only run when the "safe to test" label is applied
if: contains(github.event.pull_request.labels.*.name, 'safe to test')
runs-on: ubuntu-latest
timeout-minutes: 15
strategy:
fail-fast: false
matrix:
config_file:
- "openai-gpt-4o-mini.json"
- "azure-gpt-4o-mini.json"
- "claude-3-5-sonnet.json"
- "claude-4-sonnet-extended.json"
- "claude-3-7-sonnet-extended.json"
- "gemini-pro.json"
- "gemini-vertex.json"
services:
qdrant:
image: qdrant/qdrant
ports:
- 6333:6333
postgres:
image: pgvector/pgvector:pg17
ports:
- 5432:5432
env:
POSTGRES_HOST_AUTH_METHOD: trust
POSTGRES_DB: postgres
POSTGRES_USER: postgres
options: >-
--health-cmd pg_isready
--health-interval 10s
--health-timeout 5s
--health-retries 5
redis:
image: redis:7
ports:
- 6379:6379
options: >-
--health-cmd "redis-cli ping"
--health-interval 5s
--health-timeout 5s
--health-retries 10
steps:
# Ensure secrets don't leak
- name: Configure git to hide secrets
run: |
git config --global core.logAllRefUpdates false
git config --global log.hideCredentials true
- name: Set up secret masking
run: |
# Automatically mask any environment variable ending with _KEY
for var in $(env | grep '_KEY=' | cut -d= -f1); do
value="${!var}"
if [[ -n "$value" ]]; then
# Mask the full value
echo "::add-mask::$value"
# Also mask partial values (first and last several characters)
# This helps when only parts of keys appear in logs
if [[ ${#value} -gt 8 ]]; then
echo "::add-mask::${value:0:8}"
echo "::add-mask::${value:(-8)}"
fi
# Also mask with common formatting changes
# Some logs might add quotes or other characters
echo "::add-mask::\"$value\""
echo "::add-mask::$value\""
echo "::add-mask::\"$value"
echo "Masked secret: $var (length: ${#value})"
fi
done
# Check out base repository code, not the PR's code (for security)
- name: Checkout base repository
uses: actions/checkout@v4 # No ref specified means it uses base branch
# Only extract relevant files from the PR (for security, specifically prevent modification of workflow files)
- name: Extract PR schema files
run: |
# Fetch PR without checking it out
git fetch origin pull/${{ github.event.pull_request.number }}/head:pr-${{ github.event.pull_request.number }}
# Extract ONLY the schema files
git checkout pr-${{ github.event.pull_request.number }} -- letta/
- name: Set up python 3.12
id: setup-python
uses: actions/setup-python@v5
with:
python-version: 3.12
- name: Install uv
uses: astral-sh/setup-uv@v4
with:
version: "latest"
- name: Load cached venv
id: cached-uv-dependencies
uses: actions/cache@v4
with:
path: .venv
key: venv-${{ runner.os }}-${{ steps.setup-python.outputs.python-version }}-${{ hashFiles('**/uv.lock') }}
restore-keys: |
venv-${{ runner.os }}-${{ steps.setup-python.outputs.python-version }}-
- name: Install dependencies
if: steps.cached-uv-dependencies.outputs.cache-hit != 'true'
shell: bash
run: uv sync --extra dev --extra postgres --extra external-tools --extra cloud-tool-sandbox --extra google
- name: Install letta packages
run: |
uv run pip install --upgrade letta-client letta
- name: Migrate database
env:
LETTA_PG_PORT: 5432
LETTA_PG_USER: postgres
LETTA_PG_PASSWORD: postgres
LETTA_PG_DB: postgres
LETTA_PG_HOST: localhost
run: |
psql -h localhost -U postgres -d postgres -c 'CREATE EXTENSION vector'
uv run alembic upgrade head
- name: Run integration tests for ${{ matrix.config_file }}
env:
LLM_CONFIG_FILE: ${{ matrix.config_file }}
LETTA_PG_PORT: 5432
LETTA_PG_USER: postgres
LETTA_PG_PASSWORD: postgres
LETTA_PG_DB: postgres
LETTA_PG_HOST: localhost
LETTA_REDIS_HOST: localhost
LETTA_REDIS_PORT: 6379
LETTA_SERVER_PASS: test_server_token
OPENAI_API_KEY: ${{ secrets.OPENAI_API_KEY }}
ANTHROPIC_API_KEY: ${{ secrets.ANTHROPIC_API_KEY }}
AZURE_API_KEY: ${{ secrets.AZURE_API_KEY }}
AZURE_BASE_URL: ${{ secrets.AZURE_BASE_URL }}
GEMINI_API_KEY: ${{ secrets.GEMINI_API_KEY }}
COMPOSIO_API_KEY: ${{ secrets.COMPOSIO_API_KEY }}
DEEPSEEK_API_KEY: ${{ secrets.DEEPSEEK_API_KEY }}
GOOGLE_CLOUD_PROJECT: ${{ secrets.GOOGLE_CLOUD_PROJECT }}
GOOGLE_CLOUD_LOCATION: ${{ secrets.GOOGLE_CLOUD_LOCATION }}
LETTA_GEMINI_FORCE_MINIMUM_THINKING_BUDGET: true
run: |
uv run pytest \
-s -vv \
tests/integration_test_send_message.py \
--maxfail=1 --durations=10

View File

@@ -0,0 +1,48 @@
name: 🐍🧪 [Core] Send Message SDK Tests
on:
pull_request:
branches:
- main
pull_request_target:
branches:
- main
types: [labeled]
concurrency:
group: ${{ github.workflow }}-${{ github.event.pull_request.number || github.ref }}
cancel-in-progress: ${{ github.ref != 'refs/heads/main' }}
jobs:
send-message-tests:
# Run on pull_request OR on pull_request_target only when labeled "safe to test"
if: github.event_name == 'pull_request' || (github.event_name == 'pull_request_target' && contains(github.event.pull_request.labels.*.name, 'safe to test'))
uses: ./.github/workflows/reusable-test-workflow.yml
with:
test-type: 'send-message'
changed-files-pattern: |
**
.github/workflows/reusable-test-workflow.yml
.github/workflows/send-message-integration-tests.yml
install-args: '--extra dev --extra postgres --extra external-tools --extra cloud-tool-sandbox --extra google --extra redis'
timeout-minutes: 15
runner: '["self-hosted", "medium"]'
ref: ${{ github.event.pull_request.head.sha || github.sha }}
use-redis: true
# TODO: "azure-gpt-4o-mini.json" add back later, getting content violation
matrix-strategy: |
{
"fail-fast": false,
"matrix": {
"config_file": [
"openai-gpt-4o-mini.json",
"claude-4-sonnet-extended.json",
"claude-3-5-sonnet.json",
"claude-3-7-sonnet-extended.json",
"gemini-1.5-pro.json",
"gemini-2.5-pro.json",
"gemini-2.5-flash.json"
]
}
}
secrets: inherit

47
.github/workflows/test-lmstudio.yml vendored Normal file
View File

@@ -0,0 +1,47 @@
name: Self-Hosted Provider Integration - LMStudio
on:
workflow_dispatch:
# inputs:
# ref:
# description: 'Git ref to test'
# required: false
# type: string
# default: ${{ github.sha || github.ref || github.event.pull_request.head.sha }}
pull_request:
paths:
- '**'
- '.github/workflows/test-lmstudio.yml'
- '.github/workflows/reusable-test-workflow.yml'
pull_request_target:
types: [labeled]
paths:
- '**'
- '.github/workflows/test-lmstudio.yml'
- '.github/workflows/reusable-test-workflow.yml'
concurrency:
group: ${{ github.workflow }}-${{ github.event.pull_request.number || github.ref }}
cancel-in-progress: ${{ github.ref != 'refs/heads/main' }}
jobs:
test-lmstudio:
# Run on pull_request OR on pull_request_target only when labeled "safe to test"
if: github.event_name == 'workflow_dispatch' || github.event_name == 'pull_request' || (github.event_name == 'pull_request_target' && contains(github.event.pull_request.labels.*.name, 'safe to test'))
uses: ./.github/workflows/reusable-test-workflow.yml
with:
test-type: "integration"
install-args: "--extra postgres --extra external-tools --extra dev --extra cloud-tool-sandbox --extra google"
test-command: "uv run pytest -svv tests/"
timeout-minutes: 60
runner: '["self-hosted", "gpu", "lmstudio"]'
matrix-strategy: |
{
"fail-fast": false,
"matrix": {
"test_suite": [
"integration_test_send_message.py"
]
}
}
secrets: inherit

48
.github/workflows/test-ollama.yml vendored Normal file
View File

@@ -0,0 +1,48 @@
name: Self-Hosted Provider Integration - Ollama
on:
workflow_dispatch:
# inputs:
# ref:
# description: 'Git ref to test'
# required: false
# type: string
# default: ${{ github.sha || github.ref || github.event.pull_request.head.sha }}
pull_request:
paths:
- '**'
- '.github/workflows/test-ollama.yml'
- '.github/workflows/reusable-test-workflow.yml'
pull_request_target:
types: [labeled]
paths:
- '**'
- '.github/workflows/test-ollama.yml'
- '.github/workflows/reusable-test-workflow.yml'
concurrency:
group: ${{ github.workflow }}-${{ github.event.pull_request.number || github.ref }}
cancel-in-progress: ${{ github.ref != 'refs/heads/main' }}
jobs:
test-ollama:
# Run on pull_request OR on pull_request_target only when labeled "safe to test"
if: github.event_name == 'workflow_dispatch' || github.event_name == 'pull_request' || (github.event_name == 'pull_request_target' && contains(github.event.pull_request.labels.*.name, 'safe to test'))
uses: ./.github/workflows/reusable-test-workflow.yml
with:
test-type: "integration"
install-args: "--extra postgres --extra external-tools --extra dev --extra cloud-tool-sandbox --extra google"
test-command: "uv run --frozen pytest -svv tests/"
timeout-minutes: 60
runner: '["self-hosted", "gpu", "ollama"]'
matrix-strategy: |
{
"fail-fast": false,
"matrix": {
"test_suite": [
"test_providers.py::test_ollama",
"integration_test_send_message.py"
]
}
}
secrets: inherit

44
.github/workflows/test-vllm.yml vendored Normal file
View File

@@ -0,0 +1,44 @@
name: Self-Hosted Provider Integration - vLLM
on:
workflow_dispatch:
# inputs:
# ref:
# description: 'Git ref to test'
# required: false
# type: string
# default: ${{ github.sha || github.ref || github.event.pull_request.head.sha }}
pull_request:
paths:
- '**'
- '.github/workflows/test-vllm.yml'
- '.github/workflows/reusable-test-workflow.yml'
pull_request_target:
types: [labeled]
paths:
- '**'
- '.github/workflows/test-vllm.yml'
- '.github/workflows/reusable-test-workflow.yml'
jobs:
test-vllm:
# Run on pull_request OR on pull_request_target only when labeled "safe to test"
if: github.event_name == 'workflow_dispatch' || github.event_name == 'pull_request' || (github.event_name == 'pull_request_target' && contains(github.event.pull_request.labels.*.name, 'safe to test'))
uses: ./.github/workflows/reusable-test-workflow.yml
with:
test-type: "integration"
install-args: "--extra postgres --extra external-tools --extra dev --extra cloud-tool-sandbox --extra google"
test-command: "uv run --frozen pytest -svv tests/"
timeout-minutes: 60
runner: '["self-hosted", "gpu", "vllm"]'
matrix-strategy: |
{
"fail-fast": false,
"matrix": {
"test_suite": [
"test_providers.py::test_vllm",
"integration_test_send_message.py"
]
}
}
secrets: inherit

1
.gitignore vendored
View File

@@ -6,6 +6,7 @@ openapi_letta.json
openapi_openai.json
CLAUDE.md
AGENTS.md
### Eclipse ###
.metadata

View File

@@ -0,0 +1,35 @@
"""add_hidden_property_to_groups_and_blocks
Revision ID: 5b804970e6a0
Revises: ddb69be34a72
Create Date: 2025-09-03 22:19:03.825077
"""
from typing import Sequence, Union
import sqlalchemy as sa
from alembic import op
# revision identifiers, used by Alembic.
revision: str = "5b804970e6a0"
down_revision: Union[str, None] = "ddb69be34a72"
branch_labels: Union[str, Sequence[str], None] = None
depends_on: Union[str, Sequence[str], None] = None
def upgrade() -> None:
# Add hidden column to groups table
op.add_column("groups", sa.Column("hidden", sa.Boolean(), nullable=True))
# Add hidden column to block table
op.add_column("block", sa.Column("hidden", sa.Boolean(), nullable=True))
def downgrade() -> None:
# Remove hidden column from block table
op.drop_column("block", "hidden")
# Remove hidden column from groups table
op.drop_column("groups", "hidden")

View File

@@ -0,0 +1,33 @@
"""add build request latency to step metrics
Revision ID: 750dd87faa12
Revises: 5b804970e6a0
Create Date: 2025-09-06 14:28:32.119084
"""
from typing import Sequence, Union
import sqlalchemy as sa
from alembic import op
# revision identifiers, used by Alembic.
revision: str = "750dd87faa12"
down_revision: Union[str, None] = "5b804970e6a0"
branch_labels: Union[str, Sequence[str], None] = None
depends_on: Union[str, Sequence[str], None] = None
def upgrade() -> None:
# ### commands auto generated by Alembic - please adjust! ###
op.add_column("step_metrics", sa.Column("step_start_ns", sa.BigInteger(), nullable=True))
op.add_column("step_metrics", sa.Column("llm_request_start_ns", sa.BigInteger(), nullable=True))
# ### end Alembic commands ###
def downgrade() -> None:
# ### commands auto generated by Alembic - please adjust! ###
op.drop_column("step_metrics", "step_start_ns")
op.drop_column("step_metrics", "llm_request_start_ns")
# ### end Alembic commands ###

View File

@@ -0,0 +1,70 @@
"""Add vector db provider to source
Revision ID: b888f21b151f
Revises: 750dd87faa12
Create Date: 2025-09-08 14:49:58.846429
"""
from typing import Sequence, Union
import sqlalchemy as sa
from alembic import op
from letta.settings import settings
# revision identifiers, used by Alembic.
revision: str = "b888f21b151f"
down_revision: Union[str, None] = "750dd87faa12"
branch_labels: Union[str, Sequence[str], None] = None
depends_on: Union[str, Sequence[str], None] = None
def upgrade() -> None:
# determine backfill value based on current pinecone settings
try:
from pinecone import IndexEmbed, PineconeAsyncio
pinecone_available = True
except ImportError:
pinecone_available = False
use_pinecone = all(
[
pinecone_available,
settings.enable_pinecone,
settings.pinecone_api_key,
settings.pinecone_agent_index,
settings.pinecone_source_index,
]
)
if settings.letta_pg_uri_no_default:
# commit required before altering enum in postgresql
connection = op.get_bind()
connection.execute(sa.text("COMMIT"))
connection.execute(sa.text("ALTER TYPE vectordbprovider ADD VALUE IF NOT EXISTS 'PINECONE'"))
connection.execute(sa.text("COMMIT"))
vectordbprovider = sa.Enum("NATIVE", "TPUF", "PINECONE", name="vectordbprovider", create_type=False)
op.add_column("sources", sa.Column("vector_db_provider", vectordbprovider, nullable=True))
if use_pinecone:
op.execute("UPDATE sources SET vector_db_provider = 'PINECONE' WHERE vector_db_provider IS NULL")
else:
op.execute("UPDATE sources SET vector_db_provider = 'NATIVE' WHERE vector_db_provider IS NULL")
op.alter_column("sources", "vector_db_provider", nullable=False)
else:
op.add_column("sources", sa.Column("vector_db_provider", sa.String(), nullable=True))
if use_pinecone:
op.execute("UPDATE sources SET vector_db_provider = 'PINECONE' WHERE vector_db_provider IS NULL")
else:
op.execute("UPDATE sources SET vector_db_provider = 'NATIVE' WHERE vector_db_provider IS NULL")
def downgrade() -> None:
op.drop_column("sources", "vector_db_provider")
# enum type remains as postgresql doesn't support removing values

Binary file not shown.

After

Width:  |  Height:  |  Size: 141 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 153 KiB

BIN
fern/assets/favicon.png Normal file

Binary file not shown.

After

Width:  |  Height:  |  Size: 342 B

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

145
fern/assets/leaderboard.css Normal file
View File

@@ -0,0 +1,145 @@
/* ────────────────────────────────────────────────────────────────
assets/leaderboard.css (namespaced so it never leaks styles)
──────────────────────────────────────────────────────────────── */
/* hide rows that dont match search */
#letta-leaderboard tr.hidden { display: none !important; }
/* clickable, sortable headers */
#letta-leaderboard thead th[data-key] {
cursor: pointer;
user-select: none;
position: relative;
}
#letta-leaderboard thead th.asc::after,
#letta-leaderboard thead th.desc::after {
position: absolute;
right: 6px;
top: 50%;
transform: translateY(-50%);
font-size: 10px;
line-height: 1;
}
#letta-leaderboard thead th.asc::after { content: "▲"; }
#letta-leaderboard thead th.desc::after { content: "▼"; }
/* bar-chart cells */
#letta-leaderboard .bar-cell {
position: relative;
padding: 8px;
overflow: hidden;
}
#letta-leaderboard .bar-viz {
position: absolute;
left: 0;
top: 50%;
transform: translateY(-50%);
height: 36px;
z-index: 1;
max-width: 100%;
border-radius: 0;
}
#letta-leaderboard .bar-cell span.value {
position: absolute;
left: 5px;
top: 50%;
transform: translateY(-50%);
background: rgba(255, 255, 255, 0.7);
padding: 0 4px;
font-size: 14px;
z-index: 2;
border-radius: 0;
}
#letta-leaderboard .bar-cell span.warn {
position: absolute;
right: 5px;
top: 50%;
transform: translateY(-50%);
font-size: 15px;
line-height: 1;
color: #dc3545;
cursor: help;
z-index: 2;
}
/* bar colours */
#letta-leaderboard .avg .bar-viz { background: rgba(40, 167, 69, 0.35); } /* green */
#letta-leaderboard .cost-ok .bar-viz { background: rgba(255, 193, 7, 0.35); } /* amber */
#letta-leaderboard .cost-high .bar-viz { background: rgba(220, 53, 69, 0.35); } /* red */
/* faint ruler + right border */
#letta-leaderboard .bar-cell::before {
content: "";
position: absolute;
top: 50%;
left: 0;
width: 100%;
height: 8px;
transform: translateY(-50%);
pointer-events: none;
background: repeating-linear-gradient(
90deg,
rgba(170, 170, 170, 0.5) 0 1px,
transparent 1px 25%
);
}
#letta-leaderboard .bar-cell::after {
content: "";
position: absolute;
top: 50%;
right: 0;
width: 1px;
height: 8px;
background: rgba(170, 170, 170, 0.5);
transform: translateY(-50%);
pointer-events: none;
}
/* table layout tweaks */
#letta-leaderboard tbody tr { height: 50px; }
#letta-leaderboard .metric { width: 32%; }
#letta-leaderboard table { table-layout: fixed; }
/* search box */
#letta-leaderboard #lb-search,
#letta-leaderboard #lb-search:focus {
border-radius: 0 !important;
outline: none;
}
/* ───────────────────────────────
Dark-mode overrides
(everything else inherits)
───────────────────────────────*/
:is(.dark) #letta-leaderboard {
/* 1. Bar-fill colours — a hair brighter & less transparent */
.avg .bar-viz { background: rgba(56, 189, 98 , .55); } /* green */
.cost-ok .bar-viz { background: rgba(255, 213, 90 , .55); } /* amber */
.cost-high .bar-viz { background: rgba(255, 99 ,132 , .55); } /* red */
/* 2. Ruler + right-edge -- subtle light lines instead of grey */
.bar-cell::before {
background: repeating-linear-gradient(
90deg,
rgba(255,255,255,.12) 0 1px,
transparent 1px 25%
);
}
.bar-cell::after { background: rgba(255,255,255,.12); }
/* 3. Value pill dark background so it doesnt glow */
.bar-cell span.value {
background: rgba(0,0,0,.65);
color: #fff;
}
/* 4. Header text & sort glyphs lighten slightly */
thead th { color:#e2e2e2; }
thead th::after { color:#e2e2e2; }
}
/* 5. Header row background */
:is(.dark) #letta-leaderboard thead {
background:#1a1a1a !important; /* pick any dark tone */
}

153
fern/assets/leaderboard.js Normal file
View File

@@ -0,0 +1,153 @@
/* ──────────────────────────────────────────────────────────
assets/leaderboard.js
Load via docs.yml → js: - path: assets/leaderboard.js
(strategy: lazyOnload is fine)
────────────────────────────────────────────────────────── */
import yaml from 'https://cdn.jsdelivr.net/npm/js-yaml@4.1.0/+esm';
console.log('🏁 leaderboard.js loaded on', location.pathname);
const COST_CAP = 20;
/* ---------- helpers ---------- */
const pct = (v) => Number(v).toPrecision(3) + '%';
const cost = (v) => '$' + Number(v).toFixed(2);
const ready = (cb) =>
document.readyState === 'loading'
? document.addEventListener('DOMContentLoaded', cb)
: cb();
/* ---------- main ---------- */
ready(async () => {
// const host = document.getElementById('letta-leaderboard');
// if (!host) {
// console.warn('LB-script: #letta-leaderboard not found - bailing out.');
// return;
// }
/* ---- wait for the leaderboard container to appear (SPA nav safe) ---- */
const host = await new Promise((resolve, reject) => {
const el = document.getElementById('letta-leaderboard');
if (el) return resolve(el); // SSR / hard refresh path
const obs = new MutationObserver(() => {
const found = document.getElementById('letta-leaderboard');
if (found) {
obs.disconnect();
resolve(found); // CSR navigation path
}
});
obs.observe(document.body, { childList: true, subtree: true });
setTimeout(() => {
obs.disconnect();
reject(new Error('#letta-leaderboard never appeared'));
}, 5000); // safety timeout
}).catch((err) => {
console.warn('LB-script:', err.message);
return null;
});
if (!host) return; // still no luck → give up
/* ----- figure out URL of data.yaml ----- */
// const path = location.pathname.endsWith('/')
// ? location.pathname
// : location.pathname.replace(/[^/]*$/, ''); // strip file/slug
// const dataUrl = `${location.origin}${path}data.yaml`;
// const dataUrl = `${location.origin}/leaderboard/data.yaml`; // one-liner, always right
// const dataUrl = `${location.origin}/assets/leaderboard.yaml`;
// const dataUrl = `./assets/leaderboard.yaml`; // one-liner, always right
// const dataUrl = `${location.origin}/data.yaml`; // one-liner, always right
// const dataUrl = 'https://raw.githubusercontent.com/letta-ai/letta-leaderboard/main/data/letta_memory_leaderboard.yaml';
const dataUrl =
'https://cdn.jsdelivr.net/gh/letta-ai/letta-leaderboard@latest/data/letta_memory_leaderboard.yaml';
console.log('LB-script: fetching', dataUrl);
/* ----- fetch & parse YAML ----- */
let rows;
try {
const resp = await fetch(dataUrl);
console.log(`LB-script: status ${resp.status}`);
if (!resp.ok) throw new Error(`HTTP ${resp.status}`);
rows = yaml.load(await resp.text());
} catch (err) {
console.error('LB-script: failed to load YAML →', err);
return;
}
/* ----- wire up table ----- */
const dir = Object.create(null);
const tbody = document.getElementById('lb-body');
const searchI = document.getElementById('lb-search');
const headers = document.querySelectorAll('#lb-table thead th[data-key]');
searchI.value = ''; // clear any persisted filter
const render = () => {
const q = searchI.value.toLowerCase();
tbody.innerHTML = rows
.map((r) => {
const over = r.total_cost > COST_CAP;
const barW = over ? '100%' : (r.total_cost / COST_CAP) * 100 + '%';
const costCls = over ? 'cost-high' : 'cost-ok';
const warnIcon = over
? `<span class="warn" title="Cost exceeds $${COST_CAP} cap - bar is clipped to full width">⚠</span>`
: '';
return `
<tr class="${q && !r.model.toLowerCase().includes(q) ? 'hidden' : ''}">
<td style="padding:8px">${r.model}</td>
<td class="bar-cell avg metric">
<div class="bar-viz" style="width:${r.average}%"></div>
<span class="value">${pct(r.average)}</span>
</td>
<td class="bar-cell ${costCls} metric">
<div class="bar-viz" style="width:${barW}"></div>
<span class="value">${cost(r.total_cost)}</span>
${warnIcon}
</td>
</tr>`;
})
.join('');
};
const setIndicator = (activeKey) => {
headers.forEach((h) => {
h.classList.remove('asc', 'desc');
if (h.dataset.key === activeKey) h.classList.add(dir[activeKey]);
});
};
/* initial sort ↓ */
dir.average = 'desc';
rows.sort((a, b) => b.average - a.average);
setIndicator('average');
render();
/* search */
searchI.addEventListener('input', render);
/* column sorting */
headers.forEach((th) => {
const key = th.dataset.key;
th.addEventListener('click', () => {
const asc = dir[key] === 'desc';
dir[key] = asc ? 'asc' : 'desc';
rows.sort((a, b) => {
const va = a[key],
vb = b[key];
const cmp =
typeof va === 'number'
? va - vb
: String(va).localeCompare(String(vb));
return asc ? cmp : -cmp;
});
setIndicator(key);
render();
});
});
});

16
fern/assets/logo-dark.svg Normal file
View File

@@ -0,0 +1,16 @@
<svg width="75" height="22" viewBox="0 0 75 22" fill="none" xmlns="http://www.w3.org/2000/svg">
<g clip-path="url(#clip0_80_2)">
<path d="M13.2017 8.80036H8.80133V13.2002H13.2017V8.80036Z" fill="white"/>
<path d="M17.6019 2.99742V0H4.40033V2.99742C4.40033 3.77228 3.77267 4.39988 2.99773 4.39988H0V17.6001H2.99773C3.77267 17.6001 4.40033 18.2277 4.40033 19.0026V22H17.6019V19.0026C17.6019 18.2277 18.2296 17.6001 19.0045 17.6001H22.0023V4.39988H19.0045C18.2296 4.39988 17.6019 3.77228 17.6019 2.99742ZM17.6019 16.1971C17.6019 16.9719 16.9743 17.5995 16.1993 17.5995H5.80355C5.0286 17.5995 4.40094 16.9719 4.40094 16.1971V5.80234C4.40094 5.02747 5.0286 4.39988 5.80355 4.39988H16.1993C16.9743 4.39988 17.6019 5.02747 17.6019 5.80234V16.1971Z" fill="white"/>
<path d="M34.9429 4.39986H33.0025V17.5995H41.6265V15.7326H34.9429V4.39986Z" fill="white"/>
<path d="M47.221 8.28637H46.531C44.4567 8.28637 42.3641 9.55806 42.3641 12.3984V13.7789C42.3641 16.3534 43.8541 17.8909 46.3495 17.8909H47.4031C49.5085 17.8909 51.0065 16.6516 51.3139 14.6558L51.3408 14.4798H49.3423L49.3093 14.5886C49.0135 15.5676 48.2404 16.024 46.8763 16.024C45.1058 16.024 44.2703 15.2376 44.2501 13.5503H51.3878V12.3984C51.3878 9.55806 49.2952 8.28637 47.221 8.28637ZM44.3076 11.9004C44.5056 10.6623 45.2628 10.1533 46.8757 10.1533C48.4885 10.1533 49.2451 10.6623 49.4431 11.9004H44.3076Z" fill="white"/>
<path d="M55.2595 4.39986H53.3197V8.28642H52.0302V10.1533H53.3197V13.851C53.3197 17.1124 55.3042 17.5995 56.4874 17.5995H57.7115V15.7326H57.0142C55.768 15.7326 55.2595 15.1032 55.2595 13.5608V10.1539H57.7115V8.28703H55.2595V4.39986Z" fill="white"/>
<path d="M61.815 4.39986H59.8751V8.28642H58.5856V10.1533H59.8751V13.851C59.8751 17.1124 61.8596 17.5995 63.0428 17.5995H64.2669V15.7326H63.5696C62.3234 15.7326 61.815 15.1032 61.815 13.5608V10.1539H64.2669V8.28703H61.815V4.39986Z" fill="white"/>
<path d="M74.2617 15.7326C73.8772 15.7326 73.7061 15.5724 73.7061 15.2131V12.0348C73.7061 8.77341 71.7217 8.28637 70.5385 8.28637H68.7588C67.2199 8.28637 65.5728 9.41323 65.5728 11.0907V11.2435H67.5126V11.0907C67.5126 10.5737 68.1452 10.1539 68.922 10.1539H70.0117C71.4039 10.1539 71.7046 10.655 71.7602 11.7739H68.958C66.7915 11.7739 65.3363 12.9301 65.3363 14.6509V14.8507C65.3363 15.7594 65.6889 17.8732 68.958 17.8732C69.7929 17.8732 71.2517 17.7272 72.0364 16.7959C72.5119 17.6007 73.5136 17.6007 74.2617 17.6007H74.4144V15.7338H74.2617V15.7326ZM71.7657 14.7407C71.7657 15.7778 70.1192 16.0045 69.4842 16.0045C67.6367 16.0045 67.2755 15.5541 67.2755 14.7768C67.2755 13.9139 68.0395 13.4581 69.4842 13.4581H71.7657V14.7407Z" fill="white"/>
</g>
<defs>
<clipPath id="clip0_80_2">
<rect width="75" height="22" fill="white"/>
</clipPath>
</defs>
</svg>

After

Width:  |  Height:  |  Size: 2.7 KiB

View File

@@ -0,0 +1,9 @@
<svg aria-hidden="true" fill="none" height="100%" preserveaspectratio="xMidYMid meet" role="img" viewBox="0 0 75 22" width="100%" xmlns="http://www.w3.org/2000/svg">
<path d="M13.2017 8.80036H8.80133V13.2002H13.2017V8.80036Z" fill="currentColor"></path>
<path d="M17.6019 2.99742V0H4.40033V2.99742C4.40033 3.77228 3.77267 4.39988 2.99773 4.39988H0V17.6001H2.99773C3.77267 17.6001 4.40033 18.2277 4.40033 19.0026V22H17.6019V19.0026C17.6019 18.2277 18.2296 17.6001 19.0045 17.6001H22.0023V4.39988H19.0045C18.2296 4.39988 17.6019 3.77228 17.6019 2.99742ZM17.6019 16.1971C17.6019 16.9719 16.9743 17.5995 16.1993 17.5995H5.80355C5.0286 17.5995 4.40094 16.9719 4.40094 16.1971V5.80234C4.40094 5.02747 5.0286 4.39988 5.80355 4.39988H16.1993C16.9743 4.39988 17.6019 5.02747 17.6019 5.80234V16.1971Z" fill="currentColor"></path>
<path d="M34.9429 4.39986H33.0025V17.5995H41.6265V15.7326H34.9429V4.39986Z" fill="currentColor"></path>
<path d="M47.221 8.28637H46.531C44.4567 8.28637 42.3641 9.55806 42.3641 12.3984V13.7789C42.3641 16.3534 43.8541 17.8909 46.3495 17.8909H47.4031C49.5085 17.8909 51.0065 16.6516 51.3139 14.6558L51.3408 14.4798H49.3423L49.3093 14.5886C49.0135 15.5676 48.2404 16.024 46.8763 16.024C45.1058 16.024 44.2703 15.2376 44.2501 13.5503H51.3878V12.3984C51.3878 9.55806 49.2952 8.28637 47.221 8.28637ZM44.3076 11.9004C44.5056 10.6623 45.2628 10.1533 46.8757 10.1533C48.4885 10.1533 49.2451 10.6623 49.4431 11.9004H44.3076Z" fill="currentColor"></path>
<path d="M55.2595 4.39986H53.3197V8.28642H52.0302V10.1533H53.3197V13.851C53.3197 17.1124 55.3042 17.5995 56.4874 17.5995H57.7115V15.7326H57.0142C55.768 15.7326 55.2595 15.1032 55.2595 13.5608V10.1539H57.7115V8.28703H55.2595V4.39986Z" fill="currentColor"></path>
<path d="M61.815 4.39986H59.8751V8.28642H58.5856V10.1533H59.8751V13.851C59.8751 17.1124 61.8596 17.5995 63.0428 17.5995H64.2669V15.7326H63.5696C62.3234 15.7326 61.815 15.1032 61.815 13.5608V10.1539H64.2669V8.28703H61.815V4.39986Z" fill="currentColor"></path>
<path d="M74.2617 15.7326C73.8772 15.7326 73.7061 15.5724 73.7061 15.2131V12.0348C73.7061 8.77341 71.7217 8.28637 70.5385 8.28637H68.7588C67.2199 8.28637 65.5728 9.41323 65.5728 11.0907V11.2435H67.5126V11.0907C67.5126 10.5737 68.1452 10.1539 68.922 10.1539H70.0117C71.4039 10.1539 71.7046 10.655 71.7602 11.7739H68.958C66.7915 11.7739 65.3363 12.9301 65.3363 14.6509V14.8507C65.3363 15.7594 65.6889 17.8732 68.958 17.8732C69.7929 17.8732 71.2517 17.7272 72.0364 16.7959C72.5119 17.6007 73.5136 17.6007 74.2617 17.6007H74.4144V15.7338H74.2617V15.7326ZM71.7657 14.7407C71.7657 15.7778 70.1192 16.0045 69.4842 16.0045C67.6367 16.0045 67.2755 15.5541 67.2755 14.7768C67.2755 13.9139 68.0395 13.4581 69.4842 13.4581H71.7657V14.7407Z" fill="currentColor"></path>
</svg>

After

Width:  |  Height:  |  Size: 2.7 KiB

307
fern/assets/styles.css Normal file
View File

@@ -0,0 +1,307 @@
/* .fern-header-container * {
font-weight: 600;
} */
/* Remove rounded corners across the docs site */
:root {
--radius: 0px;
}
/* Override styles related to soft borders */
.fern-button {
border-radius: 0 !important;
}
.fern-collapsible-card {
border-radius: 0 !important;
}
.fern-api-property-meta code {
border-radius: 0 !important;
}
.fern-docs-badge {
border-radius: 0 !important;
}
.bg-accent-highlight {
border-radius: 0 !important;
}
.fern-scroll-area {
border-radius: 0 !important;
}
.fern-dropdown-item {
border-radius: 0 !important;
}
.fern-anchor-icon {
border-radius: 0 !important;
}
.fern-search-bar {
border-radius: 0 !important;
}
.keyboard-shortcut-hint {
border-radius: 0 !important;
}
.fern-search-button {
border-radius: 0 !important;
}
code:not(.code-block) {
border-radius: 0 !important;
}
.fern-accordion {
border-radius: 0 !important;
}
.fern-table-root,
.fern-table,
.fern-table thead,
.fern-table tbody,
.fern-table tr,
.fern-table th,
.fern-table td {
border-radius: 0 !important;
}
/* [data-radix-scroll-area-viewport] {
border-radius: 0 !important;
}
[data-radix-popper-content-wrapper] {
border-radius: 0 !important;
} */
[data-radix-popper-content-wrapper],
[data-radix-popper-content-wrapper] > * {
border-radius: 0 !important;
}
.rounded-xl,
.rounded-lg,
.rounded-md,
.rounded-sm,
.fern-sidebar-link {
border-radius: 0px !important;
}
:is(.light) .code-block-line-content span[style*="color: rgb(194, 195, 197);"] {
color: #8e8e8e !important;
}
/* Different opacity for active items in the sidebar */
/* Light mode */
:is(.light) .fern-sidebar-link-container[data-state="active"] .fern-sidebar-link {
background-color: rgba(7, 7, 172, 0.04);
}
:is(.light) body#fern-docs .fern-sidebar-link[data-state="active"] {
background-color: rgba(7, 7, 172, 0.04);
}
:is(.light) .fern-sidebar-link-container[data-state="active"] .fern-sidebar-link-text {
color: #0707ac;
}
:is(.light) body#fern-docs .fern-sidebar-link[data-state="active"] span {
color: #0707ac;
}
/* Dark mode */
:is(.dark) .fern-sidebar-link-container[data-state="active"] .fern-sidebar-link {
background-color: rgba(255, 187, 173, 0.08); /* #FFBBAD */
}
:is(.dark) body#fern-docs .fern-sidebar-link[data-state="active"] {
background-color: rgba(255, 187, 173, 0.08); /* #FFBBAD */
}
:is(.dark) .fern-sidebar-link-container[data-state="active"] .fern-sidebar-link-text {
color: #FF5533;
}
:is(.dark) body#fern-docs .fern-sidebar-link[data-state="active"] span {
color: #FF5533;
}
/* Make uppercase sidebar heading */
.fern-sidebar-heading .fern-sidebar-heading-content,
.fern-breadcrumb-item {
/* font-family: var(--typography-code-font-family); */
font-weight: 600;
/* letter-spacing: 0.05em; */
text-transform: uppercase;
/* color: var(--gray-12); */
font-size: 0.8rem;
/* text-decoration: none; */
}
/* .fern-theme-default.fern-container .fern-header-tabs .fern-header-tab-button .fern-header-container * {
font-size: 1rem;
} */
.t-muted.whitespace-nowrap.text-xs,
.inline-flex.items-baseline.gap-1 {
display: none !important;
}
/* @supports (overscroll-behavior: none) {
html, body {
overscroll-behavior: none;
}
} */
/* dark/light mode toggle for images */
:is(.dark) img.dark {
display: block;
}
:is(.dark) img.light {
display: none;
}
:is(.light) img.light {
display: block;
}
:is(.light) img.dark {
display: none;
}
/* Landing page styles */
.landing-page {
margin-inline: auto;
min-width: calc(var(--spacing) * 0);
padding-inline: var(--page-padding);
max-width: calc(var(--spacing-page-width) + var(--spacing-page-padding)*2);
.letta-header {
padding-top: 7rem !important;
padding-bottom: 7rem !important;
position: relative !important;
}
.letta-header-bg {
background-color: #f6f6f6 !important;
width: 100vw;
position: absolute;
top: 0%;
bottom: 0%;
left: 50%;
transform: translate(-50%);
z-index: -1;
}
.hero-image-container {
width: var(--page-width);
position: relative;
}
.hero-image {
position: absolute !important;
right: 0 !important;
top: 50% !important;
transform: translateY(-50%) !important;
height: 100% !important;
max-height: 400px !important;
z-index: 0 !important;
opacity: 0.5 !important;
width: fit-content;
pointer-events: none !important;
}
.hero-image.dark {
display: none !important;
}
.letta-header h1 {
font-size: 4.0rem !important;
line-height: 1.1 !important;
font-weight: 300 !important;
font-family: Roobert, sans-serif !important; /* Use regular Roobert instead of Medium */
}
.letta-header p {
font-size: 1.25rem !important;
line-height: 1.3 !important;
font-weight: 400 !important;
}
.letta-header a {
border-bottom: 1px solid rgba(255,255,255,0.5) !important;
font-size: 0.5rem !important;
font-weight: normal !important;
}
.letta-header a:hover {
border-bottom-color: white !important;
}
.fern-main .landingbody {
max-width: 1195px !important;
margin-left: auto !important;
margin-right: auto !important;
}
#fern-sidebar {
display: none !important;
}
@media (max-width: 1504px) {
.hero-image-container {
width: 100vw !important;
}
}
/* Tablet viewport breakpoint */
@media (max-width: 1024px) {
.letta-header {
padding-top: 4rem !important;
padding-bottom: 4rem !important;
}
.letta-header h1 {
font-size: 3rem !important;
}
.letta-header p {
font-size: 1.1rem !important;
}
.hero-image-container {
display: none !important;
}
}
/* Mobile viewport breakpoint */
@media (max-width: 640px) {
.letta-header {
padding-top: 3rem !important;
padding-bottom: 3rem !important;
}
.letta-header h1 {
font-size: 2.5rem !important;
}
.letta-header p {
font-size: 1rem !important;
}
.letta-header .max-w-4xl {
padding-left: 1rem !important;
padding-right: 1rem !important;
}
.landingbody {
padding-left: 1rem !important;
padding-right: 1rem !important;
}
}
}
:is(.dark) .landing-page .letta-header-bg {
background-color: #151515 !important;
}
:is(.dark) .landing-page.hero-image.light {
display: none !important;
}
:is(.dark) .landing-page .hero-image.dark {
display: block !important;
}

View File

@@ -0,0 +1,72 @@
## Consistency Across Messages APIs
<Note> These are the final changes from our API overhaul, which means they are not backwards compatible to prior versions of our APIs and SDKs. Upgrading may require changes to your code. </Note>
### Flattened `UserMessage` content
The content field on `UserMessage` objects returned by our Messages endpoints have been simplified to flat strings containing raw message text, rather than JSON strings with message text nested inside.
#### Before:
```python
{
"id": "message-dea2ceab-0863-44ea-86dc-70cf02c05946",
"date": "2025-01-28T01:18:18+00:00",
"message_type": "user_message",
"content": "{\n \"type\": \"user_message\",\n \"message\": \"Hello, how are you?\",\n \"time\": \"2025-01-28 01:18:18 AM UTC+0000\"\n}"
}
```
#### After:
```python
{
"id": "message-dea2ceab-0863-44ea-86dc-70cf02c05946",
"date": "2025-01-28T01:18:18+00:00",
"message_type": "user_message",
"content": "Hello, how are you?"
}
```
### Top-level `use_assistant_message` parameter defaults to True
All message related APIs now include a top-level `use_assistant_message` parameter, which defaults to `True` if not specified. This parameter controls whether the endpoint should parse specific tool call arguments (default `send_message`) as AssistantMessage objects rather than ToolCallMessage objects.
#### Before:
```python
response = client.agents.messages.create(
agent_id=agent.id,
messages=[
MessageCreate(
role="user",
content="call the big_return function",
),
],
config=LettaRequestConfig(use_assistant_message=False),
)
```
#### After:
```python
response = client.agents.messages.create(
agent_id=agent.id,
messages=[
MessageCreate(
role="user",
content="call the big_return function",
),
],
use_assistant_message=False,
)
```
Previously, the `List Messages` endpoint defaulted to False internally, so this change may cause unexpected behavior in your code. To fix this, you can set the `use_assistant_message` parameter to `False` in your request.
```python
messages = client.agents.messages.list(
limit=10,
use_assistant_message=False,
)
```
### Consistent message return type
All message related APIs return `LettaMessage` objects now, which are simplified versions of `Message` objects stored in the database backend. Previously, our `List Messages` endpoint returned `Message` objects by default, which is no longer an option.

View File

@@ -0,0 +1,22 @@
### Tool rules improvements
ToolRule objects no longer should specify a `type` at instantiation, as this field is now immutable.
#### Before:
```python
rule = InitToolRule(
tool_name="secret_message",
type="run_first"
)
```
#### After:
```python
rule = InitToolRule(tool_name="secret_message")
```
Letta also now supports smarter retry behavior for tool rules in the case of unrecoverable failures.
### New API routes to query agent steps
The [`List Steps`](https://docs.letta.com/api-reference/steps/list-steps) and [`Retrieve Step`](https://docs.letta.com/api-reference/steps/retrieve-step) routes have been added to enable querying for additional metadata around agent execution.

View File

@@ -0,0 +1,42 @@
### Query tools by name
The `List Tools` API now supports querying by tool name.
```python
send_message_tool_id = client.agents.tools.list(tool_name="secret_message")[0].id
```
### Authorization header now supports password
For self-deployed instances of Letta that are password-protected, the `Authorization` header now supports parsing passwords in addition to API keys. `X-BARE-PASSWORD` will still be supported as legacy, but will be deprecated in a future release.
#### Before:
```sh
curl --request POST \
--url https://MYSERVER.up.railway.app/v1/agents/ \
--header 'X-BARE-PASSWORD: password banana' \
--header 'Content-Type: application/json' \
--data '{
...
}'
```
#### After:
```sh
curl --request POST \
--url https://MYSERVER.up.railway.app/v1/agents/ \
--header 'AUTHORIZATION: Bearer banana' \
--header 'Content-Type: application/json' \
--data '{
...
}'
```
Password can now be passed via the `token` field when initializing the Letta client:
```python
client = LettaClient(
base_url="https://MYSERVER.up.railway.app",
token="banana",
)
```

View File

@@ -0,0 +1,11 @@
## Agents API Improvements
<Note> These APIs are only available for Letta Cloud. </Note>
### Agent Search
The [`/v1/agents/search`](https://docs.letta.com/api-reference/agents/search) API has been updated to support pagination via `after` query parameter
### Agent Creation from Template
The [`/v1/templates/`](https://docs.letta.com/api-reference/templates/createagentsfromtemplate) creation API has been updated to support adding `tags` at creation time

View File

@@ -0,0 +1,3 @@
## Temperature and Max Tokens Supported via LLM Config
These values are now configurable when creating and modifying agents via [`llm_config`](https://docs.letta.com/api-reference/agents/modify#request.body.llm_config) parameter for subsequent LLM requests.

View File

@@ -0,0 +1,9 @@
## New Features
### Google Vertex support
Google Vertex is now a supported endpoint type for Letta agents.
### Option to disable message persistence for a given agent
Letta agents now have an optional `message_buffer_autoclear` flag. If set to True (default False), the message history will not be persisted in-context between requests (though the agent will still have access to core, archival, and recall memory).

View File

@@ -0,0 +1,113 @@
## Project Slug Moved to Request Header
<Note> Projects are only available for Letta Cloud. </Note>
Project slug can now be specified via request header `X-Project` for agent creation. The existing `project` parameter will soon be deprecated.
#### Before
<CodeBlocks>
```curl title="curl"
curl -X POST https://app.letta.com/v1/agents \
-H 'Content-Type: application/json' \
-H 'Authorization: Bearer YOUR_API_KEY' \
-d '{
"project":"YOUR_PROJECT_SLUG"
"model":"gpt-4o-mini",
"embedding":"openai/text-embedding-3-small"
"memory_blocks": [
{
"label": "human",
"value": "name: Caren"
}
],
}'
```
```python title="python"
from letta_client import CreateBlock, Letta
client = Letta(
token="YOUR_API_KEY",
)
agent = client.agents.create(
project="YOUR_PROJECT_SLUG",
model="gpt-4o-mini",
embedding="openai/text-embedding-3-small"
memory_blocks=[
CreateBlock(
"label": "human",
"value": "name: Caren"
),
],
)
```
```typescript title="node.js"
import { LettaClient } from '@letta-ai/letta-client';
const client = new LettaClient({
token: "YOUR_API_KEY",
});
const agent = await client.agents.create({
project: "YOUR_PROJECT_SLUG",
model: "gpt-4o-mini",
embedding: "openai/text-embedding-3-small"
memory_blocks: [
{
label: "human",
value: "name: Caren"
},
],
});
```
</CodeBlocks>
#### After
<CodeBlocks>
```curl title="curl"
curl -X POST https://app.letta.com/v1/agents \
-H 'Content-Type: application/json' \
-H 'Authorization: Bearer YOUR_API_KEY' \
-H 'X-Project: YOUR_PROJECT_SLUG' \
-d '{
"model":"gpt-4o-mini",
"embedding":"openai/text-embedding-3-small"
"memory_blocks": [
{
"label": "human",
"value": "name: Caren"
}
],
}'
```
```python title="python"
from letta_client import CreateBlock, Letta
client = Letta(
token="YOUR_API_KEY",
)
agent = client.agents.create(
x_project="YOUR_PROJECT_SLUG",
model="gpt-4o-mini",
embedding="openai/text-embedding-3-small"
memory_blocks=[
CreateBlock(
"label": "human",
"value": "name: Caren"
),
],
)
```
```typescript title="node.js"
import { LettaClient } from '@letta-ai/letta-client';
const client = new LettaClient({
token: "YOUR_API_KEY",
});
const agent = await client.agents.create({
x_project: "YOUR_PROJECT_SLUG",
model: "gpt-4o-mini",
embedding: "openai/text-embedding-3-small"
memory_blocks: [
{
label: "human",
value: "name: Caren"
},
],
});
```
</CodeBlocks>

View File

@@ -0,0 +1,7 @@
## New Identities Feature
We've added a new Identities feature that helps you manage users in your multi-user Letta application. Each Identity can represent a user or organization in your system and store their metadata.
You can associate an Identity with one or more agents, making it easy to track which agents belong to which users. Agents can also be associated with multiple identities, enabling shared access across different users. This release includes full CRUD (Create, Read, Update, Delete) operations for managing Identities through our API.
For more information on usage, visit our [Identities documentation](/api-reference/identities) and [usage guide](/guides/agents/multi-user).

View File

@@ -0,0 +1,85 @@
## Core Memory and Archival Memory SDK APIs Renamed to Blocks and Passages
<Note> This is a breaking SDK change and is not backwards compatible. </Note>
Given the confusion around our advanced functionality for managing memory, we've renamed the Core Memory SDK API to `blocks` and the Archival Memory SDK API to `passages` so that our API naming reflects the unit of memory stored. This change only affects our SDK, and does not affect Letta's Rest API.
#### Before
<CodeBlocks>
```python title="python"
from letta_client import CreateBlock, Letta
client = Letta(
token="YOUR_API_KEY",
)
agent = client.agents.create(
model="gpt-4o-mini",
embedding="openai/text-embedding-3-small"
memory_blocks=[
CreateBlock(
"label": "human",
"value": "name: Caren"
),
],
)
blocks = client.agents.core_memory.list_blocks(agent_id=agent.id)
client.agents.core_memory.detach_block(agent_id=agent.id, block_id=blocks[0].id)
```
```typescript title="node.js"
import { LettaClient } from '@letta-ai/letta-client';
const client = new LettaClient({
token: "YOUR_API_KEY",
});
const agent = await client.agents.create({
model: "gpt-4o-mini",
embedding: "openai/text-embedding-3-small"
memory_blocks: [
{
label: "human",
value: "name: Caren"
},
],
});
const blocks = await client.agents.coreMemory.listBlocks(agent.id);
await client.agents.coreMemory.detachBlock(agent.id, blocks[0].id);
```
</CodeBlocks>
#### After
<CodeBlocks>
```python title="python"
from letta_client import CreateBlock, Letta
client = Letta(
token="YOUR_API_KEY",
)
agent = client.agents.create(
model="gpt-4o-mini",
embedding="openai/text-embedding-3-small"
memory_blocks=[
CreateBlock(
"label": "human",
"value": "name: Caren"
),
],
)
blocks = client.agents.blocks.list(agent_id=agent.id)
client.agents.blocks.detach(agent_id=agent.id, block_id=blocks[0].id)
```
```typescript title="node.js"
import { LettaClient } from '@letta-ai/letta-client';
const client = new LettaClient({
token: "YOUR_API_KEY",
});
const agent = await client.agents.create({
model: "gpt-4o-mini",
embedding: "openai/text-embedding-3-small"
memory_blocks: [
{
label: "human",
value: "name: Caren"
},
],
});
const blocks = client.agents.blocks.list(agent.id)
await client.agents.blocks.detach(agent.id, blocks[0].id)
```
</CodeBlocks>

View File

@@ -0,0 +1,3 @@
## xAI / Grok Now Supported
We've added xAI support in the latest SDK version. To enable xAI models, set your `XAI_API_KEY` as an environment variable: `export XAI_API_KEY="..."`.

View File

@@ -0,0 +1,28 @@
## Added Modify Passage API
We've introduced a new API endpoint that allows you to modify existing passages within agent memory.
<CodeBlocks>
```python title="python"
from letta_client import Letta
client = Letta(
token="YOUR_API_KEY",
)
client.agents.modify_passage(
agent_id="AGENT_ID",
memory_id="MEMORY_ID",
text="Updated passage content"
)
```
```typescript title="node.js"
import { LettaClient } from '@letta-ai/letta-client';
const client = new LettaClient({
token: "YOUR_API_KEY",
});
await client.agents.modifyPassage({
agent_id: "AGENT_ID",
memory_id: "MEMORY_ID",
text: "Updated passage content"
});
```
</CodeBlocks>

View File

@@ -0,0 +1,77 @@
## Enhanced Tool Definitions with Complex Schemas
### Complex Schema Support for Tool Arguments
You can now use complex Pydantic schemas to define arguments for tools, enabling better type safety and validation for your tool inputs.
```python
from pydantic import BaseModel
from typing import List, Optional
class ItemData(BaseModel):
name: str
sku: str
price: float
description: Optional[str] = None
class InventoryEntry(BaseModel):
item: ItemData
location: str
current_stock: int
minimum_stock: int = 5
class InventoryEntryData(BaseModel):
data: InventoryEntry
quantity_change: int
```
## Tool Creation from Function with Complex Schema
Use the args_schema parameter to specify a Pydantic model for tool arguments when creating tools from functions.
```python
from letta_client import Letta
client = Letta(
token="YOUR_API_KEY",
)
def manage_inventory_mock(data: InventoryEntry, quantity_change: int) -> bool:
"""
Implementation of the manage_inventory tool
"""
print(f"Updated inventory for {data.item.name} with a quantity change of {quantity_change}")
return True
tool_from_func = client.tools.upsert_from_function(
func=manage_inventory_mock,
args_schema=InventoryEntryData,
)
```
### BaseTool Class Extension
For more complex tool implementations, you can also extend the `BaseTool` class to create custom tools with full control over the implementation.
```python
from letta_client import BaseTool
from typing import Type, List
from pydantic import BaseModel
class ManageInventoryTool(BaseTool):
name: str = "manage_inventory"
args_schema: Type[BaseModel] = InventoryEntryData
description: str = "Update inventory catalogue with a new data entry"
tags: List[str] = ["inventory", "shop"]
def run(self, data: InventoryEntry, quantity_change: int) -> bool:
"""
Implementation of the manage_inventory tool
"""
# implementation
print(f"Updated inventory for {data.item.name} with a quantity change of {quantity_change}")
return True
custom_tool = client.tools.add(
tool=ManageInventoryTool(),
)
```

View File

@@ -0,0 +1,29 @@
## Added List Run Steps API
We've introduced a new API endpoint that allows you to list all steps associated with a specific run. This feature makes it easier to track and analyze the sequence of steps performed during a run.
<CodeBlocks>
```python title="python"
from letta_client import Letta
client = Letta(
token="YOUR_API_KEY",
)
steps = client.runs.list_run_steps(
run_id="RUN_ID",
)
for step in steps:
print(f"Step ID: {step.id}, Tokens: {step.total_tokens}")
```
```typescript title="node.js"
import { LettaClient } from '@letta-ai/letta-client';
const client = new LettaClient({
token: "YOUR_API_KEY",
});
const steps = await client.runs.listRunSteps({
run_id: "RUN_ID",
});
steps.forEach(step => {
console.log(`Step ID: ${step.id}, Tokens: ${step.total_tokens}`);
});
```
</CodeBlocks>

View File

@@ -0,0 +1,60 @@
## Agent Serialization: Download and Upload APIs
We've added new APIs that allow you to download an agent's serialized JSON representation and upload it to recreate the agent in the system. These features enable easy agent backup, transfer between environments, and version control of agent configurations.
### Import Agent Serialized
Import a serialized agent file and recreate the agent in the system.
<CodeBlocks>
```python title="python"
from letta_client import Letta
client = Letta(
token="YOUR_API_KEY",
)
agent = client.agents.import_agent_serialized(
file=open("/path/to/agent/file.af", "rb"),
)
```
```typescript title="node.js"
import { LettaClient } from '@letta-ai/letta-client';
import * as fs from 'fs';
const client = new LettaClient({
token: "YOUR_API_KEY",
});
const agent = await client.agents.importAgentSerialized({
file: fs.createReadStream("/path/to/your/file"),
});
```
</CodeBlocks>
### Export Agent Serialized
Export the serialized JSON representation of an agent, formatted with indentation.
<CodeBlocks>
```python title="python"
from letta_client import Letta
client = Letta(
token="YOUR_API_KEY",
)
agent_json = client.agents.export_agent_serialized(
agent_id="AGENT_ID",
)
```
```typescript title="node.js"
import { LettaClient } from '@letta-ai/letta-client';
const client = new LettaClient({
token: "YOUR_API_KEY",
});
const agentJson = await client.agents.exportAgentSerialized({
agent_id: "AGENT_ID",
});
```
</CodeBlocks>
## Use Cases
- Environment Migration: Transfer agents between local, desktop, and cloud environments
- Version Control: Save agent configurations before making significant changes
- Templating: Create template agents that can be quickly deployed for different use cases
- Sharing: Share agent configurations with team members or across organizations

View File

@@ -0,0 +1,32 @@
## Message Modification API
We've added a new API endpoint that allows you to modify existing messages in an agent's conversation history. This feature is particularly useful for editing message history to refine agent behavior without starting a new conversation.
<CodeBlocks>
```python title="python"
from letta_client import Letta, UpdateSystemMessage
client = Letta(
token="YOUR_API_KEY",
)
client.agents.messages.modify(
agent_id="AGENT_ID",
message_id="MESSAGE_ID",
request=UpdateSystemMessage(
content="The agent should prioritize brevity in responses.",
),
)
```
```typescript title="node.js"
import { LettaClient } from '@letta-ai/letta-client';
const client = new LettaClient({
token: "YOUR_API_KEY",
});
await client.agents.messages.modify({
agent_id: "AGENT_ID",
message_id: "MESSAGE_ID",
request: {
content: "The agent should prioritize brevity in responses."
}
});
```
</CodeBlocks>

View File

@@ -0,0 +1,51 @@
## Identity Support for Memory Blocks
Memory blocks can now be associated with specific identities, allowing for better organization and retrieval of contextual information about various entities in your agent's knowledge base.
### Adding Blocks to an Identity
<CodeBlocks>
```python title="python"
from letta_client import Letta, CreateBlock
client = Letta(
token="YOUR_API_KEY",
)
client.agents.identities.modify(
identity_id="IDENTITY_ID",
block_ids=["BLOCK_ID"],
)
```
```typescript title="node.js"
import { LettaClient } from '@letta-ai/letta-client';
const client = new LettaClient({
token: "YOUR_API_KEY",
});
await client.agents.identities.modify({
identity_id: "IDENTITY_ID",
block_ids: ["BLOCK_ID"],
});
```
</CodeBlocks>
### Querying Blocks by Identity
<CodeBlocks>
```python title="python"
from letta_client import Letta
client = Letta(
token="YOUR_API_KEY",
)
client.agents.blocks.list(
identity_id="IDENTITY_ID",
)
```
```typescript title="node.js"
import { LettaClient } from '@letta-ai/letta-client';
const client = new LettaClient({
token: "YOUR_API_KEY",
});
await client.agents.blocks.list({
identity_id: "IDENTITY_ID",
});
```
</CodeBlocks>

View File

@@ -0,0 +1,3 @@
## MCP Now Supported
We've added MCP support in the latest SDK version. For full documentation on how to enable MCP with Letta, visit [our MCP guide](/guides/mcp/setup).

View File

@@ -0,0 +1,24 @@
## New `include_relationships` Parameter for List Agents API
You can now leverage a more customized, lightweight response from the list agents API by setting the `include_relationships` parameter to which fields you'd like to fetch in the response.
<CodeBlocks>
```python title="python"
from letta_client import Letta
client = Letta(
token="YOUR_API_KEY",
)
agents = client.agents.list(
include_relationships=["identities", "blocks", "tools"],
)
```
```typescript title="node.js"
import { LettaClient } from '@letta-ai/letta-client';
const client = new LettaClient({
token: "YOUR_API_KEY",
});
const agents = await client.agents.list({
include_relationships: ["identities", "blocks", "tools"],
});
```
</CodeBlocks>

View File

@@ -0,0 +1,28 @@
## Message `content` field extended to include Multi-modal content parts
The `content` field on `UserMessage` and `AssistantMessage` objects returned by our Messages endpoints has been extended to support multi-modal content parts, in anticipation of allowing you to send and receive messages with text, images, and other media.
### Before:
```curl
{
"id": "message-dea2ceab-0863-44ea-86dc-70cf02c05946",
"date": "2025-01-28T01:18:18+00:00",
"message_type": "user_message",
"content": "Hello, how are you?"
}
```
### After:
```curl
{
"id": "message-dea2ceab-0863-44ea-86dc-70cf02c05946",
"date": "2025-01-28T01:18:18+00:00",
"message_type": "user_message",
"content": [
{
"type": "text",
"text": "Hello, how are you?"
}
]
}
```

View File

@@ -0,0 +1,3 @@
## `Embedding` model info now specified directly on Source
The `Source` object returned by our Sources endpoints now stores embedding related fields, to specify the embedding model and chunk size used to generate the source.

View File

@@ -0,0 +1,39 @@
## Max invocation count tool rule
A new tool rule has been introduced for configuring a max step count per tool rule.
<CodeBlocks>
```python title="python"
from letta_client import Letta
client = Letta(
token="YOUR_API_KEY",
)
client.agents.create(
model="openai/gpt-4o-mini",
embedding="openai/text-embedding-3-small",
tool_rules=[
MaxCountPerStepToolRule(
tool_name="manage_inventory",
max_count_limit=10
)
]
)
```
```typescript title="node.js"
import { LettaClient } from '@letta-ai/letta-client';
const client = new LettaClient({
token: "YOUR_API_KEY",
});
const agent = await client.agents.create({
model: "openai/gpt-4o-mini",
embedding: "openai/text-embedding-3-small",
tool_rules: [
{
type: "max_count_per_step",
tool_name: "manage_inventory",
max_count_limit: 10
}
]
});
```
</CodeBlocks>

View File

@@ -0,0 +1,11 @@
## Output messages added to Steps API
The `Step` object returned by our Steps endpoints now includes a `steps_messages` field, which contains a list of messages generated by the step.
## Order parameter added to List Agents and List Passages APIs
The `List Agents` and `List Passages` endpoints now support an `ascending` parameter to sort the results based on creation timestamp.
## Filter parameters added List Passages API
The `List Passages` endpoint now supports filter parameters to filter the results including `after`, `before`, and `search` for filtering by text.

View File

@@ -0,0 +1,30 @@
## New fields to support reasoning models
The `LlmConfig` object now includes a `enable_reasoner` field, enables toggling on thinking steps for reasoning models like Sonnet 3.7. This change also includes support for specifying this along with `max_reasoning_tokens` in the agent creation API.
<CodeBlocks>
```python title="python"
from letta_client import Letta
client = Letta(
token="YOUR_API_KEY",
)
agent = client.agents.create(
model="claude/sonnet-3-7",
enable_reasoner=True,
max_reasoning_tokens=10000,
max_tokens=100000
)
```
```typescript title="node.js"
import { LettaClient } from '@letta-ai/letta-client';
const client = new LettaClient({
token: "YOUR_API_KEY",
});
const agent = await client.agents.create({
model: "claude/sonnet-3-7",
enable_reasoner: true,
max_reasoning_tokens: 10000,
max_tokens: 100000
});
```
</CodeBlocks>

View File

@@ -0,0 +1,28 @@
## Modify Agent API now supports `model` and `embedding` fields
The `Modify Agent` API now supports `model` and `embedding` fields to update the model and embedding used by the agent using the handles rather than specifying the entire configs.
<CodeBlocks>
```python title="python"
from letta_client import Letta
client = Letta(
token="YOUR_API_KEY",
)
client.agents.modify(
agent_id="AGENT_ID",
model="openai/gpt-4o-mini",
embedding="openai/text-embedding-3-small",
)
```
```typescript title="node.js"
import { LettaClient } from '@letta-ai/letta-client';
const client = new LettaClient({
token: "YOUR_API_KEY",
});
await client.agents.modify({
agent_id: "AGENT_ID",
model: "openai/gpt-4o-mini",
embedding: "openai/text-embedding-3-small",
});
```
</CodeBlocks>

View File

@@ -0,0 +1,26 @@
## New `strip_messages` field in Import Agent API
The `Import Agent` API now supports a new `strip_messages` field to remove messages from the agent's conversation history when importing a serialized agent file.
<CodeBlocks>
```python title="python"
from letta_client import Letta
client = Letta(
token="YOUR_API_KEY",
)
client.agents.import_agent_serialized(
file=open("/path/to/agent/file.af", "rb"),
strip_messages=True,
)
```
```typescript title="node.js"
import { LettaClient } from '@letta-ai/letta-client';
const client = new LettaClient({
token: "YOUR_API_KEY",
});
await client.agents.importAgentSerialized({
file: fs.createReadStream("/path/to/your/file"),
strip_messages: true,
});
```
</CodeBlocks>

View File

@@ -0,0 +1,41 @@
## Add new `otid` field to Message API
The `Message` object returned by our Messages endpoints now includes an offline threading id field, a unique identifier set at creation time, which can be used by the client to deduplicate messages.
### Before:
<CodeBlocks>
```python title="python"
from letta_client import Letta, MessageCreate
import uuid
client = Letta(
token="YOUR_API_KEY",
)
messages = client.agents.messages.create(
agent_id="AGENT_ID",
messages=[
MessageCreate(
role="user",
content="Hello, how are you?"
otid=uuid.uuid4(),
)
]
)
```
```typescript title="node.js"
import { LettaClient } from '@letta-ai/letta-client';
import { v4 as uuid } from 'uuid';
const client = new LettaClient({
token: "YOUR_API_KEY",
});
const messages = await client.agents.messages.create({
agent_id: "AGENT_ID",
messages: [
{
role: "user",
content: "Hello, how are you?",
otid: uuid.v4(),
},
],
});
```
</CodeBlocks>

View File

@@ -0,0 +1,24 @@
## Runs API can now be filtered by Agent ID
The Runs API now supports filtering by `agent_id` to retrieve all runs and all active runs associated with a specific agent.
<CodeBlocks>
```python title="python"
from letta_client import Letta
client = Letta(
token="YOUR_API_KEY",
)
runs = client.runs.list_active_runs(
agent_id="AGENT_ID",
)
```
```typescript title="node.js"
import { LettaClient } from '@letta-ai/letta-client';
const client = new LettaClient({
token: "YOUR_API_KEY",
});
const runs = await client.runs.listActiveRuns({
agent_id: "AGENT_ID",
});
```
</CodeBlocks>

View File

@@ -0,0 +1,39 @@
## New Parent Tool Rule
A new tool rule has been introduced for configuring a parent tool rule, which only allows a target tool to be called after a parent tool has been run.
<CodeBlocks>
```python title="python"
from letta_client import Letta
client = Letta(
token="YOUR_API_KEY",
)
agent = client.agents.create(
model="openai/gpt-4o-mini",
embedding="openai/text-embedding-3-small",
tool_rules=[
ParentToolRule(
tool_name="parent_tool",
children=["child_tool"]
)
]
)
```
```typescript title="node.js"
import { LettaClient } from '@letta-ai/letta-client';
const client = new LettaClient({
token: "YOUR_API_KEY",
});
const agent = await client.agents.create({
model: "openai/gpt-4o-mini",
embedding: "openai/text-embedding-3-small",
tool_rules: [
{
type: "parent",
tool_name: "parent_tool",
children: ["child_tool"]
}
]
});
```
</CodeBlocks>

View File

@@ -0,0 +1,48 @@
# New Upsert Properties API for Identities
The `Upsert Properties` API has been added to the Identities endpoint, allowing you to update or create properties for an identity.
<CodeBlocks>
```python title="python"
from letta_client import IdentityProperty, Letta
client = Letta(
token="YOUR_TOKEN",
)
client.identities.upsert_properties(
identity_id="IDENTITY_ID",
request=[
IdentityProperty(
key="name",
value="Caren",
type="string",
),
IdentityProperty(
key="email",
value="caren@example.com",
type="string",
)
],
)
```
```typescript title="node.js"
import { LettaClient } from '@letta-ai/letta-client';
const client = new LettaClient({
token: "YOUR_API_KEY",
});
await client.identities.upsertProperties({
identity_id: "IDENTITY_ID",
properties: [
{
key: "name",
value: "Caren",
type: "string",
},
{
key: "email",
value: "caren@example.com",
type: "string",
},
],
});
```
</CodeBlocks>

View File

@@ -0,0 +1,42 @@
## New `reasoning_effort` field added to LLMConfig
The `reasoning_effort` field has been added to the `LLMConfig` object to control the amount of reasoning the model should perform, to support OpenAI's o1 and o3 reasoning models.
## New `sender_id` parameter added to Message model
The `Message` object now includes a `sender_id` field, which is the ID of the sender of the message, which can be either an identity ID or an agent ID. The `sender_id` is expected to be passed in at message creation time.
<CodeBlocks>
```python title="python"
from letta_client import Letta
client = Letta(
token="YOUR_API_KEY",
)
messages = client.agents.messages.create(
agent_id="AGENT_ID",
messages=[
MessageCreate(
role="user",
content="Hello, how are you?",
sender_id="IDENTITY_ID",
)
]
)
```
```typescript title="node.js"
import { LettaClient } from '@letta-ai/letta-client';
const client = new LettaClient({
token: "YOUR_API_KEY",
});
const messages = await client.agents.messages.create({
agent_id: "AGENT_ID",
messages: [
{
role: "user",
content: "Hello, how are you?",
sender_id: "IDENTITY_ID",
},
],
});
```
</CodeBlocks>

View File

@@ -0,0 +1,24 @@
## New List Agent Groups API added
The `List Agent Groups` API has been added to the Agents endpoint, allowing you to retrieve all multi-agent groups associated with a specific agent.
<CodeBlocks>
```python title="python"
from letta_client import Letta
client = Letta(
token="YOUR_API_KEY",
)
agent_groups = client.agents.list_agent_groups(
agent_id="AGENT_ID",
)
```
```typescript title="node.js"
import { LettaClient } from '@letta-ai/letta-client';
const client = new LettaClient({
token: "YOUR_API_KEY",
});
const agentGroups = await client.agents.listAgentGroups({
agent_id: "AGENT_ID",
});
```
</CodeBlocks>

View File

@@ -0,0 +1,5 @@
## New Batch message creation API
A series of new `Batch` endpoints has been introduced to support batch message creation, allowing you to perform multiple LLM requests in a single API call. These APIs leverage provider batch APIs under the hood, which can be more cost-effective than making multiple API calls.
New endpoints can be found here: [Batch Messages](https://docs.letta.com/api-reference/messages/batch)

View File

@@ -0,0 +1,7 @@
# New Projects Endpoint
<Note> These APIs are only available for Letta Cloud. </Note>
A new `Projects` endpoint has been added to the API, allowing you to manage projects and their associated templates.
The new endpoints can be found here: [Projects](https://docs.letta.com/api-reference/projects)

View File

@@ -0,0 +1,31 @@
## SDK Method Name Changes
In an effort to keep our SDK method names consistent with our conventions, we have renamed the following methods:
### Before and After
| SDK Method Name | Before | After |
| --- | --- | --- |
| List Tags | `client.tags.list_tags` | `client.tags.list` |
| Export Agent | `client.agents.export_agent_serialized` | `client.agents.export` |
| Import Agent | `client.agents.import_agent_serialized` | `client.agents.import` |
| Modify Agent Passage | `client.agents.modify_passage` | `client.agents.passages.modify` |
| Reset Agent Messages | `client.agents.reset_messages` | `client.agents.messages.reset` |
| List Agent Groups | `client.agents.list_agent_groups` | `client.agents.groups.list` |
| Reset Group Messages | `client.groups.reset_messages` | `client.groups.messages.reset` |
| Upsert Identity Properties | `client.identities.upsert_identity_properties` | `client.identities.properties.upsert` |
| Retrieve Source by Name | `client.sources.get_by_name` | `client.sources.retrieve_by_name` |
| List Models | `client.models.list_llms` | `client.models.list` |
| List Embeddings | `client.models.list_embedding_models` | `client.embeddings.list` |
| List Agents for Block | `client.blocks.list_agents_for_block` | `client.blocks.agents.list` |
| List Providers | `client.providers.list_providers` | `client.providers.list` |
| Create Provider | `client.providers.create_providers` | `client.providers.create` |
| Modify Provider | `client.providers.modify_providers` | `client.providers.modify` |
| Delete Provider | `client.providers.delete_providers` | `client.providers.delete` |
| List Runs | `client.runs.list_runs` | `client.runs.list` |
| List Active Runs | `client.runs.list_active_runs` | `client.runs.list_active` |
| Retrieve Run | `client.runs.retrieve_run` | `client.runs.retrieve` |
| Delete Run | `client.runs.delete_run` | `client.runs.delete` |
| List Run Messages | `client.runs.list_run_messages` | `client.runs.messages.list` |
| List Run Steps | `client.runs.list_run_steps` | `client.runs.steps.list` |
| Retrieve Run Usage | `client.runs.retrieve_run_usage` | `client.runs.usage.retrieve` |

688
fern/docs.yml Normal file
View File

@@ -0,0 +1,688 @@
instances:
- url: https://letta.docs.buildwithfern.com
custom-domain: https://docs.letta.com
title: Letta
experimental:
openapi-parser-v3: true
tabs:
docs:
display-name: Documentation
slug: documentation
ade:
display-name: ADE Guide
slug: ade
cloud:
display-name: Letta Cloud
skip-slug: true
selfhosted:
display-name: Self-Hosting
skip-slug: true
ref:
display-name: API Reference
skip-slug: true
cookbooks:
display-name: Cookbooks
icon: fa-sharp fa-light fa-books
skip-slug: true
github:
display-name: GitHub
icon: fa-brands fa-github
href: https://github.com/letta-ai/letta
discord:
display-name: Discord
icon: fa-brands fa-discord
href: https://discord.gg/letta
community:
display-name: Developer Community
icon: fa-sharp fa-light fa-user-astronaut
skip-slug: true
install:
display-name: Download
icon: fa-sharp fa-light fa-download
skip-slug: true
showcase:
display-name: Examples
skip-slug: true
leaderboard:
display-name: Leaderboard
skip-slug: true
landing-page:
page: home
path: pages/index.mdx
navigation:
- tab: docs
layout:
- link: Chat on Discord
icon: fa-brands fa-discord
href: https://discord.gg/letta
- link: Developer Forum
icon: fa-sharp fa-light fa-comments
href: https://forum.letta.com
- link: DeepLearning.AI Course
icon: fa-sharp fa-light fa-building-columns
href: https://www.deeplearning.ai/short-courses/llms-as-operating-systems-agent-memory/?utm_campaign=memgpt-launch&utm_content=331638345&utm_medium=social&utm_source=docs&hss_channel=tw-992153930095251456
- section: Get Started
contents:
- page: Letta Overview
path: pages/getting-started/letta_platform.mdx
- page: Quickstart
path: pages/getting-started/quickstart.mdx
- page: Prompts for Vibecoding
path: pages/getting-started/prompts.mdx
#- section: Supported Frameworks
# contents:
# - page: TypeScript (Node.js)
# path: pages/getting-started/ade.mdx
# - page: Python
# path: pages/getting-started/ade.mdx
# - page: Vercel AI SDK
# path: pages/frameworks/vercel.mdx
# - page: React
# path: pages/frameworks/react.mdx
# - page: Next.js
# path: pages/frameworks/next.mdx
# - page: Flask
# path: pages/frameworks/flask.mdx
# - page: Mastra
# path: pages/frameworks/mastra.mdx
- section: Stateful Agents
contents:
- page: Overview
path: pages/agents/overview.mdx
- section: Agent Architectures
path: pages/agents/architectures.mdx
contents:
- page: MemGPT Agents
path: pages/agents/memgpt_agents.mdx
- page: Sleep-time Agents
path: pages/agents/sleep_time_agents.mdx
- page: Low-latency (voice) Agents
path: pages/agents/low_latency_agents.mdx
- page: ReAct Agents
path: pages/agents/react_agents.mdx
- page: Workflows
path: pages/agents/workflows.mdx
- page: Stateful Workflows
path: pages/agents/stateful_workflows.mdx
- page: Context Hierarchy
path: pages/agents/context_hierarchy.mdx
- page: Heartbeats
path: pages/agents/heartbeats.mdx
- section: Memory
path: pages/agents/memory.mdx
contents:
- page: Memory Blocks
path: pages/agents/memory_blocks.mdx
- page: Agentic Context Engineering
path: pages/agents/context_engineering.mdx
- page: Filesystem
path: pages/agents/filesystem.mdx
- page: Streaming Responses
path: pages/agents/streaming.mdx
- page: Long-Running Executions
path: pages/agents/long_running.mdx
- page: JSON Mode & Structured Output
path: pages/agents/json_mode.mdx
- page: Human-in-the-Loop
path: pages/agents/human_in_the_loop.mdx
- page: Multi-Modal
path: pages/agents/multimodal.mdx
- section: Multi-Agent
path: pages/agents/multiagent.mdx
contents:
- page: Custom Multi-Agent Tools
path: pages/agents/multiagent_custom.mdx
- page: Multi-Agent Shared Memory
path: pages/agents/multiagent_memory.mdx
- page: Groups
path: pages/agents/groups.mdx
- page: Multi-User (Identities)
path: pages/agents/multiuser.mdx
- page: Agent File (.af)
path: pages/agents/agentfile.mdx
- page: Scheduling
path: pages/agents/scheduling.mdx
- section: Voice Agents
path: pages/voice/voice.mdx
contents:
- page: Connecting to LiveKit Agents
path: pages/voice/voice_livekit.mdx
- page: Connecting to Vapi
path: pages/voice/voice_vapi.mdx
- section: Tool Use
contents:
- page: Overview
path: pages/agents/tools.mdx
- page: Pre-built Tools
path: pages/agents/prebuilt_tools.mdx
- page: Custom Tools
path: pages/agents/custom_tools.mdx
- page: Tool Rules
path: pages/agents/tool_rules.mdx
- page: Tool Variables
path: pages/agents/tool_variables.mdx
- page: Composio Integration
path: pages/agents/composio.mdx
hidden: true
- section: Model Context Protocol
path: pages/mcp/overview.mdx
contents:
- page: Connecting Letta to MCP
path: pages/mcp/setup.mdx
- page: Remote (SSE/HTTP) Servers
path: pages/mcp/sse.mdx
- page: Local (stdio) Servers
path: pages/mcp/stdio.mdx
#- section: Tool Execution
# contents:
# - page: Overview
# path: pages/tool_execution/overview.mdx
# - section: Model Context Protocol
# contents:
# - page: What is MCP?
# path: pages/mcp/overview.mdx
# - section: Connecting Letta to MCP
# path: pages/mcp/setup.mdx
# contents:
# - page: Remote (SSE/HTTP) Servers
# path: pages/mcp/sse.mdx
# - page: Local (stdio) Servers
# path: pages/mcp/stdio.mdx
#- section: Deploying a Letta Server
# contents:
# - page: Letta Docker Image
# path: pages/server/docker.mdx
# - section: Connecting Model Providers
# contents:
# - page: OpenAI
# path: pages/models/openai.mdx
# - page: OpenAI proxy
# path: pages/models/openai_proxy.mdx
# - page: Anthropic
# path: pages/models/anthropic.mdx
# - page: DeepSeek
# path: pages/models/deepseek.mdx
# - page: AWS Bedrock
# path: pages/models/aws_bedrock.mdx
# - page: Groq
# path: pages/models/groq.mdx
# - page: xAI (Grok)
# path: pages/models/xai.mdx
# - page: Together
# path: pages/models/together.mdx
# - page: Google AI / Gemini
# path: pages/models/google.mdx
# - page: Google Vertex
# path: pages/models/google_vertex.mdx
# - page: Azure OpenAI
# path: pages/models/azure.mdx
# - page: Ollama
# path: pages/models/ollama.mdx
# - page: LM Studio
# path: pages/models/lmstudio.mdx
# - page: vLLM
# path: pages/models/vllm.mdx
# - section: Remote Hosting
# path: pages/deployment/remote.mdx
# contents:
# - page: Deploy on Railway
# path: pages/deployment/railway.mdx
# - section: Alternate Install Methods
# contents:
# - page: Using pip
# path: pages/server/pip.mdx
# - page: Installing from Source
# path: pages/server/source.mdx
#- section: Agent Templates
# contents:
# - page: Introduction to Templates
# path: pages/cloud/templates.mdx
# - page: Memory Variables
# path: pages/cloud/variables.mdx
# - page: Versioning
# path: pages/cloud/versions.mdx
- section: Key Concepts
contents:
- page: Letta concepts
path: pages/concepts/letta.mdx
- page: MemGPT concepts
path: pages/concepts/memgpt.mdx
- section: Additional Resources
contents:
- page: Letta Desktop Troubleshooting
path: pages/desktop/troubleshooting.mdx
- page: ADE Troubleshooting
path: pages/agent-development-environment/troubleshooting.mdx
- tab: ade
layout:
- link: Chat on Discord
icon: fa-brands fa-discord
href: https://discord.gg/letta
- link: Developer Forum
icon: fa-sharp fa-light fa-comments
href: https://forum.letta.com
- link: DeepLearning.AI Course
icon: fa-sharp fa-light fa-building-columns
href: https://www.deeplearning.ai/short-courses/llms-as-operating-systems-agent-memory/?utm_campaign=memgpt-launch&utm_content=331638345&utm_medium=social&utm_source=docs&hss_channel=tw-992153930095251456
- section: ADE Guide
contents:
- page: ADE Overview
path: pages/ade-guide/overview.mdx
- section: Getting Started
path: pages/ade-guide/setup.mdx
contents:
- page: Access from your browser
icon: fa-sharp fa-light fa-browser
path: pages/ade-guide/web.mdx
- page: Download Letta Desktop
icon: fa-sharp fa-light fa-download
path: pages/desktop/install.mdx
- section: ADE Components
contents:
- page: Agent Simulator
icon: fa-sharp fa-light fa-alien-8bit
path: pages/ade-guide/simulator.mdx
- page: Context Window Viewer
icon: fa-sharp fa-light fa-eye
path: pages/ade-guide/context_window_viewer.mdx
- page: Core Memory
icon: fa-sharp fa-light fa-brain
path: pages/ade-guide/core_memory.mdx
- page: Archival Memory
icon: fa-sharp fa-light fa-box-archive
path: pages/ade-guide/archival_memory.mdx
- page: Data Sources
icon: fa-sharp fa-light fa-database
path: pages/ade-guide/data_sources.mdx
- page: Tools
icon: fa-sharp fa-light fa-wrench
path: pages/ade-guide/tools.mdx
- page: Settings
icon: fa-sharp fa-light fa-gear
path: pages/ade-guide/settings.mdx
- tab: selfhosted
layout:
- link: Chat on Discord
icon: fa-brands fa-discord
href: https://discord.gg/letta
- link: Developer Forum
icon: fa-sharp fa-light fa-comments
href: https://forum.letta.com
- link: DeepLearning.AI Course
icon: fa-sharp fa-light fa-building-columns
href: https://www.deeplearning.ai/short-courses/llms-as-operating-systems-agent-memory/?utm_campaign=memgpt-launch&utm_content=331638345&utm_medium=social&utm_source=docs&hss_channel=tw-992153930095251456
#- page: Install Letta Desktop
# icon: fa-sharp fa-light fa-download
# path: pages/install.mdx
- section: Self-Hosting
contents:
- page: Overview
path: pages/selfhosting/overview.mdx
- page: Tool Execution
path: pages/tool_execution/local_tool_execution.mdx
- page: Tracing & Telemetry
path: pages/deployment/telemetry.mdx
- section: Deployment
path: pages/deployment/remote.mdx
contents:
- page: Railway
path: pages/deployment/railway.mdx
#- page: Deploying with Docker
# icon: fa-brands fa-docker
# path: pages/server/docker.mdx
#- page: Install Letta via pip
# icon: fa-brands fa-python
# path: pages/server/pip.mdx
- section: Connecting Model Providers
contents:
- page: Supported Models
path: pages/selfhosting/supported-models.mdx
- page: OpenAI
path: pages/models/openai.mdx
- page: Anthropic
path: pages/models/anthropic.mdx
- page: Gemini (Google AI)
path: pages/models/google.mdx
- page: LM Studio
path: pages/models/lmstudio.mdx
- section: See More Providers
icon: fa-sharp fa-light fa-caret-down
contents:
- page: OpenAI proxy
path: pages/models/openai_proxy.mdx
- page: DeepSeek
path: pages/models/deepseek.mdx
- page: AWS Bedrock
path: pages/models/aws_bedrock.mdx
- page: Groq
path: pages/models/groq.mdx
- page: xAI (Grok)
path: pages/models/xai.mdx
- page: Together
path: pages/models/together.mdx
- page: Google Vertex
path: pages/models/google_vertex.mdx
- page: Azure OpenAI
path: pages/models/azure.mdx
- page: Ollama
path: pages/models/ollama.mdx
- page: vLLM
path: pages/models/vllm.mdx
#- section: Remote Deployments
# contents:
# - page: Overview
# path: pages/deployment/remote.mdx
# - page: Example - Deploy on Railway
# path: pages/deployment/railway.mdx
- section: Advanced
contents:
#- page: Install with pip
# path: pages/server/pip.mdx
- page: Database Configuration
path: pages/selfhosting/postgres.mdx
- page: Performance
path: pages/selfhosting/performance.mdx
- page: pgadmin
path: pages/selfhosting/pgadmin.mdx
- page: Installing from Source
path: pages/server/source.mdx
- tab: cloud
layout:
- link: Chat on Discord
icon: fa-brands fa-discord
href: https://discord.gg/letta
- link: Developer Forum
icon: fa-sharp fa-light fa-comments
href: https://forum.letta.com
- link: DeepLearning.AI Course
icon: fa-sharp fa-light fa-building-columns
href: https://www.deeplearning.ai/short-courses/llms-as-operating-systems-agent-memory/?utm_campaign=memgpt-launch&utm_content=331638345&utm_medium=social&utm_source=docs&hss_channel=tw-992153930095251456
- section: Get started
contents:
- page: Overview
path: pages/cloud/overview.mdx
#- page: Quickstart
# path: pages/getting-started/quickstart_cloud.mdx
- page: Get a Letta Cloud API key
path: pages/cloud/api_key.mdx
- section: Account
contents:
- page: Plans & Pricing
path: pages/cloud/pricing.mdx
# - page: Available Models
# path: pages/cloud/models.mdx
- page: Custom API Keys
path: pages/cloud/api_keys.mdx
- page: Role-Based Access Control
path: pages/cloud/rbac.mdx
- section: Deploying Agents
contents:
- page: Agent Templates Overview
path: pages/cloud/templates.mdx
- page: Template Versioning
path: pages/cloud/versions.mdx
- page: Memory Variables
path: pages/cloud/variables.mdx
- page: Client-Side Access Tokens
path: pages/cloud/client-side-tokens.mdx
# - page: Deploying via the SDK
# path: pages/cloud/variables.mdx
# - page: Deploying via the ADE
# path: pages/cloud/versions.mdx
- section: Observability
contents:
- page: Overview
path: pages/cloud/observability.mdx
- page: Monitoring
path: pages/cloud/monitoring.mdx
- page: Responses & Tracing
path: pages/cloud/responses.mdx
- tab: ref
layout:
- link: Chat on Discord
icon: fa-brands fa-discord
href: https://discord.gg/letta
- link: Developer Forum
icon: fa-sharp fa-light fa-comments
href: https://forum.letta.com
- link: DeepLearning.AI Course
icon: fa-sharp fa-light fa-building-columns
href: https://www.deeplearning.ai/short-courses/llms-as-operating-systems-agent-memory/?utm_campaign=memgpt-launch&utm_content=331638345&utm_medium=social&utm_source=docs&hss_channel=tw-992153930095251456
- section: API Reference
contents:
- page: API and SDK Overview
path: pages/api/about.mdx
- changelog: ./changelog
title: Changelog
slug: changelog
- api: API Reference
display-errors: true
paginated: true
flattened: true
snippets:
typescript: "@letta-ai/letta-client"
python: letta-client
layout:
- agents
- tab: showcase
layout:
- link: Chat on Discord
icon: fa-brands fa-discord
href: https://discord.gg/letta
- link: Developer Forum
icon: fa-sharp fa-light fa-comments
href: https://forum.letta.com
- link: DeepLearning.AI Course
icon: fa-sharp fa-light fa-building-columns
href: https://www.deeplearning.ai/short-courses/llms-as-operating-systems-agent-memory/?utm_campaign=memgpt-launch&utm_content=331638345&utm_medium=social&utm_source=docs&hss_channel=tw-992153930095251456
- section: Examples
contents:
- page: Overview
path: pages/cookbooks_simple.mdx
- section: Multi-Agent
contents:
- page: Async Multi-Agent
path: pages/tutorials/multiagent_async.mdx
- tab: leaderboard
layout:
- link: Chat on Discord
icon: fa-brands fa-discord
href: https://discord.gg/letta
- link: Developer Forum
icon: fa-sharp fa-light fa-comments
href: https://forum.letta.com
- link: DeepLearning.AI Course
icon: fa-sharp fa-light fa-building-columns
href: https://www.deeplearning.ai/short-courses/llms-as-operating-systems-agent-memory/?utm_campaign=memgpt-launch&utm_content=331638345&utm_medium=social&utm_source=docs&hss_channel=tw-992153930095251456
- section: Letta Leaderboard
contents:
- page: Overview
path: pages/leaderboard/overview.mdx
# - page: Benchmark Information
# path: pages/leaderboard/benchmarks.mdx
- page: Contributing Results
path: pages/leaderboard/contributing.mdx
# - tab: cookbooks
# layout:
# - section: Cookbooks
# path: pages/cookbooks.mdx
# contents:
# - section: Multi-Agent
# contents:
# - page: Async Multi-Agent
# path: pages/tutorials/multiagent_async.mdx
# - tab: community
# layout:
# - page: Developer Community
# path: pages/community.mdx
colors:
accent-primary:
light: '#0707ac'
dark: '#FF5533'
background:
light: '#ffffffff'
dark: '#0d0d0d'
card-background:
light: '#f6f6f6ff'
dark: '#151515'
header-background:
light: '#fbfbfbff'
dark: '#000000ff'
border:
light: '#eef0f2ff'
dark: '#202020'
css:
- assets/styles.css
- assets/leaderboard.css
js:
- path: assets/leaderboard.js
strategy: lazyOnload
# strategy: afterInteractive
favicon: assets/favicon.png
logo:
href: /
light: assets/logo-light.svg
dark: assets/logo-dark.svg
navbar-links:
- type: github
value: https://github.com/letta-ai/letta
- type: filled
text: Launch ADE
href: https://app.letta.com
rounded: false
layout:
page-width: 1504px
tabs-placement: header
searchbar-placement: header
typography:
bodyFont:
name: ManropeRegularBody
paths:
- path: assets/fonts/manrope/Manrope-Regular.ttf
weight: 400
style: normal
- path: assets/fonts/manrope/Manrope-Medium.ttf
weight: 500 900
style: normal
headingsFont:
name: RoobertMediumHeading
path: assets/fonts/roobert/RoobertMedium.woff2
codeFont:
name: FiraCode
paths:
- path: assets/fonts/fira-code/FiraCode-Regular.ttf
weight: 400
style: normal
- path: assets/fonts/fira-code/FiraCode-Medium.ttf
weight: 500 900
style: normal
redirects:
- source: "/install"
destination: "/guides/ade/desktop"
- source: "/desktop"
destination: "/guides/ade/desktop"
- source: "/quickstart/desktop"
destination: "/guides/ade/desktop"
- source: "/quickstart/docker"
destination: "/guides/selfhosting"
- source: "/guides/server/pip"
destination: "/guides/selfhosting"
- source: "/quickstart/cloud"
destination: "/cloud/quickstart"
- source: "/guides/server/docker"
destination: "/guides/selfhosting"
- source: "/agent-development-environment"
destination: "/guides/ade/overview"
- source: "/guides/ade/usage"
destination: "/guides/ade/overview"
- source: "/guides/agents/mcp"
destination: "/guides/mcp/overview"
- source: "/guides/mcp/sse"
destination: "/guides/mcp/remote"
- source: "/guides/mcp/stdio"
destination: "/guides/mcp/local"
- source: "/guides/server/quickstart"
destination: "/quickstart"
- source: "/agent-development-environment/troubleshooting"
destination: "/guides/ade/troubleshooting"
- source: "/models/openai"
destination: "/guides/server/providers/openai"
- source: "/models/openai_proxy"
destination: "/guides/server/providers/openai-proxy"
- source: "/models/anthropic"
destination: "/guides/server/providers/anthropic"
- source: "/models/aws_bedrock"
destination: "/guides/server/providers/aws_bedrock"
- source: "/models/groq"
destination: "/guides/server/providers/groq"
- source: "/models/together"
destination: "/guides/server/providers/together"
- source: "/models/google"
destination: "/guides/server/providers/google"
- source: "/models/google_vertex"
destination: "/guides/server/providers/google_vertex"
- source: "/models/deepseek"
destination: "/guides/server/providers/deepseek"
- source: "/models/ollama"
destination: "/guides/server/providers/ollama"
- source: "/models/vllm"
destination: "/guides/server/providers/vllm"
- source: "/models/azure"
destination: "/guides/server/providers/azure"
- source: "/server/docker"
destination: "/guides/server/docker"
- source: "/server/pip"
destination: "/guides/server/pip"
- source: "/agents/tools"
destination: "/guides/agents/tools"
- source: "/concepts"
destination: "/concepts/letta"
- source: "/introduction"
destination: "/letta-platform"
- source: "/advanced/memory_management"
destination: "/guides/agents/memory"
- source: "/changelog"
destination: "/api-reference/changelog"
- source: "/api-changelog"
destination: "/api-reference/changelog"
- source: "/quickstart/cloud"
destination: "/quickstart"
- source: "/guides/cloud"
destination: "/guides/cloud/overview"
- source: "/guides/ade"
destination: "/guides/ade/overview"
- source: "/cloud/quickstart"
destination: "/guides/cloud/quickstart"
- source: "/letta-platform"
destination: "/overview"
- source: "/guides/agents/sleep-time-agents"
destination: "/guides/agents/architectures/sleeptime"
- source: "/guides/agents/sources"
destination: "/guides/agents/filesystem"
- source: "/guides/desktop/install"
destination: "/guides/ade/desktop"
- source: "/api-reference/agents/cancel-agent-run"
destination: "/api-reference/agents/messages/cancel"
- source: "/api-reference/messages/cancel-batch-run"
destination: "/api-reference/batches/cancel"

View File

@@ -0,0 +1,60 @@
from letta_client import Letta
client = Letta(base_url="http://localhost:8283")
# list available models
models = client.models.list_llms()
for model in models:
print(f"Provider {model.model_endpoint_type} model {model.model}: {model.handle}")
# list available embedding models
embedding_models = client.models.list_embedding_models()
for model in embedding_models:
print(f"Provider {model.handle}")
# openai
openai_agent = client.agents.create(
model="openai/gpt-4o-mini",
embedding="openai/text-embedding-3-small",
# optional configuration
context_window_limit=16000,
embedding_chunk_size=300,
)
# Azure OpenAI
azure_openai_agent = client.agents.create(
model="azure/gpt-4o-mini",
embedding="azure/text-embedding-3-small",
# optional configuration
context_window_limit=16000,
embedding_chunk_size=300,
)
# anthropic
anthropic_agent = client.agents.create(
model="anthropic/claude-3-5-sonnet-20241022",
# note: anthropic does not support embeddings so you will need another provider
embedding="openai/text-embedding-3-small",
# optional configuration
context_window_limit=16000,
embedding_chunk_size=300,
)
# Groq
groq_agent = client.agents.create(
model="groq/llama-3.3-70b-versatile",
# note: groq does not support embeddings so you will need another provider
embedding="openai/text-embedding-3-small",
# optional configuration
context_window_limit=16000,
embedding_chunk_size=300,
)
# Ollama
ollama_agent = client.agents.create(
model="ollama/thewindmom/hermes-3-llama-3.1-8b:latest",
embedding="ollama/mxbai-embed-large:latest",
# optional configuration
context_window_limit=16000,
embedding_chunk_size=300,
)

View File

@@ -0,0 +1,30 @@
"""
Example of using composio tools in Letta
Make sure you set `COMPOSIO_API_KEY` environment variable or run `composio login` to authenticate with Composio.
"""
from composio import Action
from letta_client import Letta
client = Letta(base_url="http://localhost:8283")
# add a composio tool
tool = client.tools.add_composio_tool(composio_action_name=Action.GITHUB_STAR_A_REPOSITORY_FOR_THE_AUTHENTICATED_USER.name)
# create an agent with the tool
agent = client.agents.create(
name="file_editing_agent",
memory_blocks=[{"label": "persona", "value": "I am a helpful assistant"}],
model="anthropic/claude-3-5-sonnet-20241022",
embedding="openai/text-embedding-3-small",
tool_ids=[tool.id],
)
print("Agent tools", [tool.name for tool in agent.tools])
# message the agent
response = client.agents.messages.create(
agent_id=agent.id, messages=[{"role": "user", "content": "Star the github repo `letta` by `letta-ai`"}]
)
for message in response.messages:
print(message)

View File

@@ -0,0 +1,56 @@
import time
from letta_client import Letta
client = Letta(base_url="http://localhost:8283")
# get available embedding models
embedding_configs = client.models.list_embedding_models()
# clear existing sources
if len(client.sources.list()) > 0:
for source in client.sources.list():
if source.name == "my_source":
client.sources.delete(source.id)
# create a source
# TODO: pass in embedding
source = client.sources.create(name="my_source", embedding_config=embedding_configs[0])
# list sources
sources = client.sources.list()
# write a dummy file
with open("dummy.txt", "w") as f:
f.write("Remember that the user is a redhead")
# upload a file into the source
job = client.sources.files.upload(source_id=source.id, file=open("dummy.txt", "rb"))
# wait until the job is completed
while True:
job = client.jobs.retrieve(job.id)
if job.status == "completed":
break
elif job.status == "failed":
raise ValueError(f"Job failed: {job.metadata}")
print(f"Job status: {job.status}")
time.sleep(1)
# list files in the source
files = client.sources.files.list(source_id=source.id)
print(f"Files in source: {files}")
# list passages in the source
passages = client.sources.passages.list(source_id=source.id)
print(f"Passages in source: {passages}")
# attach the source to an agent
agent = client.agents.create(
name="my_agent",
memory_blocks=[],
model="anthropic/claude-3-5-sonnet-20241022",
embedding=embedding_configs[0].handle,
tags=["worker"],
)
client.agents.sources.attach(agent_id=agent.id, source_id=source.id)

44
fern/examples/memory.py Normal file
View File

@@ -0,0 +1,44 @@
from letta_client import Letta
client = Letta(base_url="http://localhost:8283")
agent = client.agents.create(
name="memory_agent",
memory_blocks=[
{"label": "persona", "value": "I am a memory agent"},
{"label": "human", "value": "Name: Bob", "limit": 10000},
],
model="anthropic/claude-3-5-sonnet-20241022",
embedding="openai/text-embedding-3-small",
tags=["worker"],
)
# create a persisted block, which can be attached to agents
block = client.blocks.create(
label="organization",
value="Organization: Letta",
limit=4000,
)
# create an agent with both a shared block and its own blocks
shared_block_agent = client.agents.create(
name="shared_block_agent",
memory_blocks=[block.id],
model="anthropic/claude-3-5-sonnet-20241022",
embedding="openai/text-embedding-3-small",
tags=["worker"],
)
# list the agents blocks
blocks = client.agents.core_memory.list_blocks(shared_block_agent.id)
for block in blocks:
print(block)
# update the block (via ID)
block = client.blocks.modify(block.id, limit=10000)
# update the block (via label)
block = client.agents.core_memory.modify_block(
agent_id=shared_block_agent.id, block_label="organization", value="Organization: Letta", limit=10000
)

View File

@@ -0,0 +1,53 @@
from letta_client import Letta
client = Letta(base_url="http://localhost:8283")
try:
# create a supervisor agent
supervisor_agent = client.agents.create(
name="supervisor_agent",
memory_blocks=[
{"label": "persona", "value": "I am the supervisor, and I can communicate with worker agents with the tag `worker`"}
],
model="anthropic/claude-3-5-sonnet-20241022",
embedding="openai/text-embedding-3-small",
tags=["supervisor"],
tools=["send_message_to_agents_matching_all_tags"],
)
print(f"Created agent {supervisor_agent.name} with ID {supervisor_agent.id}")
def get_name() -> str:
"""Get the name of the worker agent."""
return "Bob"
tool = client.tools.upsert_from_function(func=get_name)
print(f"Created tool {tool.name} with ID {tool.id}")
# create a worker agent
worker_agent = client.agents.create(
name="worker_agent",
memory_blocks=[{"label": "persona", "value": f"I am the worker, my supervisor agent has ID {supervisor_agent.id}"}],
model="anthropic/claude-3-5-sonnet-20241022",
embedding="openai/text-embedding-3-small",
tool_ids=[tool.id],
tags=["worker"],
tools=["send_message_to_agents_matching_all_tags"],
)
print(f"Created agent {worker_agent.name} with ID {worker_agent.id}")
# send a message to the supervisor agent
response = client.agents.messages.create(
agent_id=worker_agent.id,
messages=[{"role": "user", "content": "Ask the worker agents what their name is, then tell me with send_message"}],
)
print(response.messages)
print(response.usage)
except Exception as e:
print(e)
# cleanup
agents = client.agents.list(tags=["worker", "supervisor"])
for agent in agents:
client.agents.delete(agent.id)
print(f"Deleted agent {agent.name} with ID {agent.id}")

View File

@@ -0,0 +1,34 @@
"""
This example shows how to create agents with tool rules, which restrict
what tool the agent can execute at a given step.
Note that by default, agents can execute any tool. As agents become more
powerful, they will not need as much guidance from the developer.
Last tested with letta-client version: 0.1.22
"""
from letta_client import ChildToolRule, InitToolRule, Letta, TerminalToolRule
client = Letta(base_url="http://localhost:8283")
# always search archival memory first
search_agent = client.agents.create(
name="search_agent",
memory_blocks=[],
model="anthropic/claude-3-5-sonnet-20241022",
embedding="openai/text-embedding-3-small",
tags=["worker"],
tool_rules=[
InitToolRule(tool_name="archival_memory_search"),
ChildToolRule(tool_name="archival_memory_search", children=["send_message"]),
# TerminalToolRule(tool_name="send_message", type="TerminalToolRule"),
TerminalToolRule(tool_name="send_message"),
],
)
response = client.agents.messages.create(
agent_id=search_agent.id,
messages=[{"role": "user", "content": "do something"}],
)
for message in response.messages:
print(message)

4
fern/fern.config.json Normal file
View File

@@ -0,0 +1,4 @@
{
"organization": "letta",
"version": "0.65.37"
}

53
fern/generators.yml Normal file
View File

@@ -0,0 +1,53 @@
auth-schemes:
token:
header: Authorization
prefix: Bearer
type: optional<string>
api:
auth: token
headers:
X-Project:
type: optional<string>
name: project
specs:
- openapi: openapi.json
overrides: openapi-overrides.yml
settings:
title-as-schema-name: false
prefer-undiscriminated-unions-with-literals: true
groups:
python-sdk:
generators:
- name: fernapi/fern-python-sdk
version: 4.25.6
output:
location: pypi
package-name: letta-client
token: ${PYPI_TOKEN}
github:
repository: letta-ai/letta-python
config:
package_name: letta_client
pydantic_config:
skip_validation: true
client:
class_name: LettaBase
filename: base_client.py
exported_class_name: Letta
exported_filename: client.py
ts-sdk:
generators:
- name: fernapi/fern-typescript-node-sdk
version: 0.51.7
output:
location: npm
package-name: "@letta-ai/letta-client"
token: ${NPM_TOKEN}
github:
repository: "letta-ai/letta-node"
config:
namespaceExport: Letta
allowCustomFetcher: true
skipResponseValidation: true
includeApiReference: true
smart-casing: true

BIN
fern/images/ade-mm-dark.png Normal file

Binary file not shown.

After

Width:  |  Height:  |  Size: 323 KiB

BIN
fern/images/ade-mm.png Normal file

Binary file not shown.

After

Width:  |  Height:  |  Size: 332 KiB

BIN
fern/images/ade_mcp.png Normal file

Binary file not shown.

After

Width:  |  Height:  |  Size: 125 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 876 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 864 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 814 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 820 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 609 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 413 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 640 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 381 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 612 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 437 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 782 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 559 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 548 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 377 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 513 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 367 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 604 KiB

Some files were not shown because too many files have changed in this diff Show More