chore: Move core tests into apps/core (OSS Migration) [LET-4169] (#4376)

* add a bunch of test to oss

* symlink and auto-detect dir

* symlink the other direction

* add pull_request_target logic

* remove undertaker and add alembic validation

* symlink doesn't work with gh actions and add validation workflow to ensure actions in cloud and oss are lockstep

* sync these

* specify extras selectively
This commit is contained in:
Kian Jones
2025-09-04 13:46:11 -07:00
committed by GitHub
parent b8d403c962
commit 5a5da527ac
14 changed files with 1214 additions and 61 deletions

113
.github/workflows/alembic-validation.yml vendored Normal file
View File

@@ -0,0 +1,113 @@
name: Alembic Migration Validation
on:
pull_request:
branches: [ main ]
pull_request_target:
branches: [ main ]
types: [labeled]
concurrency:
group: ${{ github.workflow }}-${{ github.event.pull_request.number || github.ref }}
cancel-in-progress: ${{ github.ref != 'refs/heads/main' }}
jobs:
changed-files:
# Run on pull_request OR on pull_request_target only when labeled "safe to test"
if: github.event_name == 'pull_request' || (github.event_name == 'pull_request_target' && contains(github.event.pull_request.labels.*.name, 'safe to test'))
runs-on: ubuntu-latest
name: changed-files
outputs:
all_changed_files: ${{ steps.changed-files.outputs.all_changed_files }}
any_changed: ${{ steps.changed-files.outputs.any_changed }}
steps:
- uses: actions/checkout@v4
with:
fetch-depth: 0
- name: Get changed files
id: changed-files
uses: tj-actions/changed-files@v44
with:
files: |
apps/core/alembic/**
.github/workflows/alembic-validation.yml
test-sqlite:
needs: [ changed-files ]
if: ${{ needs.changed-files.outputs.any_changed == 'true' }}
runs-on: [self-hosted, medium]
timeout-minutes: 15
steps:
- name: Checkout
uses: actions/checkout@v4
- name: Install dependencies
shell: bash
working-directory: apps/core
run: uv sync --no-install-project ${{ inputs.install-args || '--extra sqlite --extra external-tools --extra dev --extra cloud-tool-sandbox' }}
- name: Test alembic migration
working-directory: apps/core
run: |
uv run alembic upgrade head
# kinda janky but I think this might not matter for sqlite?
# uv run alembic check
- name: Cleanup persistent data
if: ${{ always() }}
working-directory: apps/core
run: |
echo "Cleaning up persistent data..."
sudo rm -rf ~/.letta || true
test-postgres:
needs: [ changed-files ]
if: ${{ needs.changed-files.outputs.any_changed == 'true' }}
runs-on: [self-hosted, medium]
timeout-minutes: 15
services:
postgres:
image: pgvector/pgvector:pg17
ports:
- 5432:5432
env:
POSTGRES_HOST_AUTH_METHOD: trust
POSTGRES_DB: postgres
POSTGRES_USER: postgres
options: >-
--health-cmd pg_isready
--health-interval 10s
--health-timeout 5s
--health-retries 5
steps:
- name: Checkout
uses: actions/checkout@v4
- name: Install dependencies
shell: bash
working-directory: apps/core
run: uv sync --no-install-project ${{ inputs.install-args || '--extra postgres --extra external-tools --extra dev --extra cloud-tool-sandbox' }}
- name: Test alembic migration
working-directory: apps/core
env:
LETTA_PG_PORT: 5432
LETTA_PG_USER: postgres
LETTA_PG_PASSWORD: postgres
LETTA_PG_DB: postgres
LETTA_PG_HOST: localhost
run: |
psql -h localhost -U postgres -d postgres -c 'CREATE EXTENSION IF NOT EXISTS vector;'
uv run alembic upgrade head
uv run alembic check
- name: Print docker logs if tests fail
if: ${{ failure() || cancelled() }}
run: |
echo "Printing Docker Logs..."
docker logs $(docker ps -aq --filter "ancestor=pgvector/pgvector:pg17") || true
- name: Cleanup containers and volumes
if: ${{ always() }}
run: |
echo "Cleaning up containers and volumes..."
docker stop $(docker ps -aq --filter "ancestor=pgvector/pgvector:pg17") || true
docker rm $(docker ps -aq --filter "ancestor=pgvector/pgvector:pg17") || true
docker volume prune -f || true
docker system prune -f || true

View File

@@ -0,0 +1,51 @@
name: 🐍🧪 [Core] Integration Tests
on:
pull_request:
branches:
- main
pull_request_target:
branches:
- main
types: [labeled]
concurrency:
group: ${{ github.workflow }}-${{ github.event.pull_request.number || github.ref }}
cancel-in-progress: ${{ github.ref != 'refs/heads/main' }}
jobs:
integration-tests:
# Run on pull_request OR on pull_request_target only when labeled "safe to test"
if: github.event_name == 'pull_request' || (github.event_name == 'pull_request_target' && contains(github.event.pull_request.labels.*.name, 'safe to test'))
uses: ./.github/workflows/reusable-test-workflow.yml
with:
test-type: 'integration'
use-redis: true
changed-files-pattern: |
apps/core/**
.github/workflows/reusable-test-workflow.yml
.github/workflows/core-integration-tests.yml
install-args: '--extra postgres --extra external-tools --extra dev --extra cloud-tool-sandbox'
timeout-minutes: 15
ref: ${{ github.event.pull_request.head.sha || github.sha }}
matrix-strategy: |
{
"fail-fast": false,
"matrix": {
"test_suite": [
"integration_test_summarizer.py",
"integration_test_async_tool_sandbox.py",
"integration_test_sleeptime_agent.py",
"integration_test_agent_tool_graph.py",
"integration_test_composio.py",
"integration_test_chat_completions.py",
"integration_test_multi_agent.py",
"integration_test_batch_api_cron_jobs.py",
"integration_test_batch_sdk.py",
"integration_test_builtin_tools.py",
"integration_test_turbopuffer.py",
"integration_test_human_in_the_loop.py"
]
}
}
secrets: inherit

63
.github/workflows/core-lint.yml vendored Normal file
View File

@@ -0,0 +1,63 @@
name: 🐍🧹 [Core] Lint and Test
on:
pull_request:
branches: [ main ]
concurrency:
group: ${{ github.workflow }}-${{ github.event.pull_request.number || github.ref }}
cancel-in-progress: ${{ github.ref != 'refs/heads/main' }}
jobs:
changed-files:
runs-on: ubuntu-latest
name: changed-files
outputs:
all_changed_files: ${{ steps.changed-files.outputs.all_changed_files }}
any_changed: ${{ steps.changed-files.outputs.any_changed }}
steps:
- uses: actions/checkout@v4
with:
fetch-depth: 0
- name: Get changed files
id: changed-files
uses: tj-actions/changed-files@v44
with:
files: |
apps/core/**
.github/workflows/core-lint.yml
main:
needs: [ changed-files ]
if: ${{ needs.changed-files.outputs.any_changed == 'true' }}
runs-on: [self-hosted, medium]
strategy:
matrix:
python-version: ["3.12"] # Adjust Python version matrix if needed
steps:
- name: Checkout
uses: actions/checkout@v4
- name: Install dependencies
shell: bash
working-directory: apps/core
run: uv sync --no-install-project ${{ inputs.install-args || '--extra postgres --extra external-tools --extra dev --extra cloud-tool-sandbox' }}
- name: Validate PR Title
if: github.event_name == 'pull_request'
uses: amannn/action-semantic-pull-request@v5
env:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
- name: Run Pyright
uses: jakebailey/pyright-action@v2
with:
python-version: ${{ matrix.python-version }}
level: "error"
continue-on-error: true
- name: Run Ruff Check
working-directory: apps/core
run: uv run ruff check --config pyproject.toml --diff .
- name: Run Ruff Format
working-directory: apps/core
run: uv run ruff format --config pyproject.toml --check --diff .

View File

@@ -0,0 +1,60 @@
name: 🐍👨‍🔬 [Core] Unit Tests (SQLite)
on:
pull_request:
branches:
- main
pull_request_target:
branches:
- main
types: [labeled]
concurrency:
group: ${{ github.workflow }}-${{ github.event.pull_request.number || github.ref }}
cancel-in-progress: ${{ github.ref != 'refs/heads/main' }}
jobs:
unit-tests:
# Run on pull_request OR on pull_request_target only when labeled "safe to test"
if: github.event_name == 'pull_request' || (github.event_name == 'pull_request_target' && contains(github.event.pull_request.labels.*.name, 'safe to test'))
uses: ./.github/workflows/reusable-test-workflow.yml
with:
test-type: 'sqlite'
use-redis: true
changed-files-pattern: |
apps/core/**
.github/workflows/reusable-test-workflow.yml
.github/workflows/core-unit-sqlite-test.yml
install-args: '--extra postgres --extra external-tools --extra dev --extra cloud-tool-sandbox --extra google --extra sqlite'
timeout-minutes: 15
ref: ${{ github.event.pull_request.head.sha || github.sha }}
matrix-strategy: |
{
"fail-fast": false,
"matrix": {
"include": [
{"test_suite": "test_client.py"},
{"test_suite": "test_sdk_client.py"},
{"test_suite": "test_server.py"},
{"test_suite": "test_tool_schema_parsing.py"},
{"test_suite": "test_tool_rule_solver.py"},
{"test_suite": "test_memory.py"},
{"test_suite": "test_utils.py"},
{"test_suite": "test_stream_buffer_readers.py"},
{"test_suite": "test_agent_serialization.py"},
{"test_suite": "test_optimistic_json_parser.py"},
{"test_suite": "test_llm_clients.py"},
{"test_suite": "test_letta_agent_batch.py"},
{"test_suite": "test_providers.py"},
{"test_suite": "test_sources.py"},
{"test_suite": "test_managers.py"},
{"test_suite": "sdk/"},
{"test_suite": "mcp_tests/", "use_experimental": true},
{"test_suite": "test_timezone_formatting.py"},
{"test_suite": "test_plugins.py"},
{"test_suite": "test_embeddings.py"}
]
}
}
secrets: inherit

60
.github/workflows/core-unit-test.yml vendored Normal file
View File

@@ -0,0 +1,60 @@
name: 🐍👨‍🔬 [Core] Unit Tests
on:
pull_request:
branches:
- main
pull_request_target:
branches:
- main
types: [labeled]
concurrency:
group: ${{ github.workflow }}-${{ github.event.pull_request.number || github.ref }}
cancel-in-progress: ${{ github.ref != 'refs/heads/main' }}
jobs:
unit-tests:
# Run on pull_request OR on pull_request_target only when labeled "safe to test"
if: github.event_name == 'pull_request' || (github.event_name == 'pull_request_target' && contains(github.event.pull_request.labels.*.name, 'safe to test'))
uses: ./.github/workflows/reusable-test-workflow.yml
with:
test-type: 'unit'
use-redis: true
changed-files-pattern: |
apps/core/**
.github/workflows/reusable-test-workflow.yml
.github/workflows/core-unit-test.yml
install-args: '--extra postgres --extra external-tools --extra dev --extra cloud-tool-sandbox --extra google'
timeout-minutes: 15
ref: ${{ github.event.pull_request.head.sha || github.sha }}
matrix-strategy: |
{
"fail-fast": false,
"matrix": {
"include": [
{"test_suite": "test_client.py"},
{"test_suite": "test_sdk_client.py"},
{"test_suite": "test_server.py"},
{"test_suite": "test_managers.py"},
{"test_suite": "test_tool_schema_parsing.py"},
{"test_suite": "test_tool_rule_solver.py"},
{"test_suite": "test_memory.py"},
{"test_suite": "test_utils.py"},
{"test_suite": "test_stream_buffer_readers.py"},
{"test_suite": "test_agent_serialization.py"},
{"test_suite": "test_agent_serialization_v2.py"},
{"test_suite": "test_optimistic_json_parser.py"},
{"test_suite": "test_llm_clients.py"},
{"test_suite": "test_letta_agent_batch.py"},
{"test_suite": "test_providers.py"},
{"test_suite": "test_sources.py"},
{"test_suite": "sdk/"},
{"test_suite": "mcp_tests/", "use_experimental": true},
{"test_suite": "test_timezone_formatting.py"},
{"test_suite": "test_plugins.py"},
{"test_suite": "test_embeddings.py"}
]
}
}
secrets: inherit

View File

@@ -1,66 +1,33 @@
name: Run Docker integration tests
on:
push:
branches: [ main ]
pull_request:
branches: [ main ]
branches:
- main
pull_request_target:
branches:
- main
types: [labeled]
concurrency:
group: docker-tests-${{ github.ref }}
cancel-in-progress: true
jobs:
test:
runs-on: ubuntu-latest
timeout-minutes: 15
steps:
- name: Checkout
uses: actions/checkout@v4
- name: Set up python 3.11
id: setup-python
uses: actions/setup-python@v5
with:
python-version: 3.11
- name: Install uv
uses: astral-sh/setup-uv@v6
with:
enable-cache: true
- name: Set permissions for log directory
run: |
mkdir -p /home/runner/.letta/logs
sudo chown -R $USER:$USER /home/runner/.letta/logs
chmod -R 755 /home/runner/.letta/logs
- name: Build and run docker dev server
env:
LETTA_PG_DB: letta
LETTA_PG_USER: letta
LETTA_PG_PASSWORD: letta
LETTA_PG_PORT: 8888
OPENAI_API_KEY: ${{ secrets.OPENAI_API_KEY }}
run: |
docker compose -f dev-compose.yaml up --build -d
- name: Wait for service
run: bash scripts/wait_for_service.sh http://localhost:8283 -- echo "Service is ready"
- name: Run tests with pytest
env:
LETTA_PG_DB: letta
LETTA_PG_USER: letta
LETTA_PG_PASSWORD: letta
LETTA_PG_PORT: 8888
LETTA_SERVER_PASS: test_server_token
LETTA_SERVER_URL: http://localhost:8283
OPENAI_API_KEY: ${{ secrets.OPENAI_API_KEY }}
PYTHONPATH: ${{ github.workspace }}:${{ env.PYTHONPATH }}
run: |
uv sync --extra dev --extra postgres
uv run pytest -s tests/test_client.py
- name: Print docker logs if tests fail
if: failure()
run: |
echo "Printing Docker Logs..."
docker compose -f dev-compose.yaml logs
docker-tests:
# Run on pull_request OR on pull_request_target only when labeled "safe to test"
if: github.event_name == 'pull_request' || (github.event_name == 'pull_request_target' && contains(github.event.pull_request.labels.*.name, 'safe to test'))
uses: ./.github/workflows/reusable-test-workflow.yml
with:
test-type: 'docker'
install-args: '--extra dev --extra postgres --extra sqlite'
timeout-minutes: 15
use-docker: true
runner: '["self-hosted", "medium"]'
ref: ${{ github.event.pull_request.head.sha || github.sha }}
changed-files-pattern: |
apps/core/**
libs/config-core-deploy/**
.github/workflows/reusable-test-workflow.yml
.github/workflows/docker-integration-tests.yaml
secrets: inherit

20
.github/workflows/fern-check.yml vendored Normal file
View File

@@ -0,0 +1,20 @@
name: 🌿 Fern Check
on:
pull_request:
branches: [ main ]
concurrency:
group: ${{ github.workflow }}-${{ github.event.pull_request.number || github.ref }}
cancel-in-progress: ${{ github.ref != 'refs/heads/main' }}
jobs:
run:
runs-on: [self-hosted, small]
steps:
- name: Checkout repository
uses: actions/checkout@v4
- name: Check API is valid
working-directory: apps
run: fern check

161
.github/workflows/lint-command.yml vendored Normal file
View File

@@ -0,0 +1,161 @@
name: Lint Command
on:
issue_comment:
types: [created]
workflow_dispatch:
inputs:
pr_number:
description: 'PR number to run lint on'
required: true
permissions:
contents: write
pull-requests: write
issues: write
jobs:
lint-command:
name: Handle /lint command
runs-on: ubuntu-latest
if: |
(github.event_name == 'workflow_dispatch' && github.event.inputs.pr_number) ||
(github.event_name == 'issue_comment' &&
github.event.issue.pull_request &&
contains(github.event.comment.body, '/lint') &&
startsWith(github.event.comment.body, '/lint'))
steps:
- name: Add acknowledgment reaction
if: github.event_name == 'issue_comment'
uses: peter-evans/create-or-update-comment@v4
with:
comment-id: ${{ github.event.comment.id }}
reactions: eyes
- name: Check permissions
if: github.event_name == 'issue_comment'
uses: actions/github-script@v7
with:
script: |
const { data: collaborator } = await github.rest.repos.getCollaboratorPermissionLevel({
owner: context.repo.owner,
repo: context.repo.repo,
username: context.actor
});
if (!['admin', 'write'].includes(collaborator.permission)) {
github.rest.issues.createComment({
owner: context.repo.owner,
repo: context.repo.repo,
issue_number: context.issue.number,
body: '❌ You need write permissions to run lint commands.'
});
core.setFailed('Insufficient permissions');
}
- name: Get PR information
id: pr
uses: actions/github-script@v7
with:
script: |
const pr_number = context.eventName === 'issue_comment'
? context.issue.number
: ${{ github.event.inputs.pr_number || 'null' }};
const { data: pr } = await github.rest.pulls.get({
owner: context.repo.owner,
repo: context.repo.repo,
pull_number: pr_number
});
core.setOutput('branch', pr.head.ref);
core.setOutput('repo', pr.head.repo.full_name);
core.setOutput('sha', pr.head.sha);
core.setOutput('number', pr_number);
- name: Checkout PR branch
uses: actions/checkout@v4
with:
ref: ${{ steps.pr.outputs.branch }}
token: ${{ secrets.GITHUB_TOKEN }}
fetch-depth: 0
- name: Set up python 3.12
id: setup-python
uses: actions/setup-python@v5
with:
python-version: 3.12
- name: Install uv
uses: astral-sh/setup-uv@v6
with:
enable-cache: false
activate-environment: true
- name: Install dependencies
run: uv sync --extra dev --extra postgres --extra external-tools
working-directory: ./apps/core
# - name: Run ruff check with fixes
# run: uv run ruff check --fix .
#
# - name: Run ruff format
# run: uv run ruff format .
- name: Run isort, black, autoflake
run: uv run isort . --profile black && uv run black . && uv run autoflake --remove-all-unused-imports --remove-unused-variables --in-place --recursive --ignore-init-module-imports .
working-directory: ./apps/core
- name: Check for changes
id: changes
run: |
if [[ -n $(git status --porcelain) ]]; then
echo "changes=true" >> $GITHUB_OUTPUT
else
echo "changes=false" >> $GITHUB_OUTPUT
fi
- name: Commit and push changes
if: steps.changes.outputs.changes == 'true'
run: |
git config --global user.name "github-actions[bot]"
git config --global user.email "github-actions[bot]@users.noreply.github.com"
git add .
git commit -m "style: lint / fmt
Triggered by /lint command from @${{ github.actor }}"
git push
- name: Comment on success
if: steps.changes.outputs.changes == 'true'
uses: peter-evans/create-or-update-comment@v4
with:
issue-number: ${{ steps.pr.outputs.number }}
body: |
✅ **Lint fixes applied successfully!**
Ruff has automatically fixed linting issues and formatted the code.
Changes have been committed to the PR branch.
- name: Comment on no changes
if: steps.changes.outputs.changes == 'false'
uses: peter-evans/create-or-update-comment@v4
with:
issue-number: ${{ steps.pr.outputs.number }}
body: |
✅ **No lint issues found!**
The code is already properly formatted and passes all linting checks.
- name: Comment on failure
if: failure()
uses: peter-evans/create-or-update-comment@v4
with:
issue-number: ${{ steps.pr.outputs.number }}
body: |
❌ **Lint command failed!**
There was an error while running the lint fixes. Please check the [workflow run](${{ github.server_url }}/${{ github.repository }}/actions/runs/${{ github.run_id }}) for details.

View File

@@ -0,0 +1,471 @@
name: Reusable Test Workflow
on:
workflow_call:
inputs:
test-type:
description: 'Type of tests to run (unit, integration, docker, send-message, sqlite)'
required: true
type: string
core-directory:
description: 'Working directory for commands. Auto-detects between apps/core (cloud) and . (OSS). Can be overridden.'
required: false
type: string
default: 'auto'
install-args:
description: 'uv sync arguments'
required: true
type: string
test-command:
description: 'Command to run tests'
required: false
type: string
default: 'uv run --frozen pytest -svv'
test-path-prefix:
description: 'Prefix for test path (e.g., tests/)'
required: false
type: string
default: 'tests/'
timeout-minutes:
description: 'Timeout in minutes'
required: false
type: number
default: 15
runner:
description: 'Runner to use'
required: false
type: string
default: '["self-hosted", "small"]'
matrix-strategy:
description: 'JSON string for matrix strategy'
required: false
type: string
default: '{}'
changed-files-pattern:
description: 'Pattern for changed files detection'
required: false
type: string
default: |
apps/core/**
.github/workflows/reusable-test-workflow.yml
skip-fern-generation:
description: 'Skip Fern SDK generation'
required: false
type: boolean
default: false
use-docker:
description: 'Use Docker for tests'
required: false
type: boolean
default: false
ref:
description: 'Git ref to wait for checks on'
required: false
type: string
default: ${{ github.sha }}
use-redis:
description: 'Use Redis for tests'
required: false
type: boolean
default: false
jobs:
changed-files:
runs-on: ${{ fromJSON(inputs.runner) }}
name: changed-files
outputs:
all_changed_files: ${{ steps.changed-files.outputs.all_changed_files }}
any_changed: ${{ steps.changed-files.outputs.any_changed }}
steps:
- uses: actions/checkout@v4
with:
fetch-depth: 0
- name: Get changed files
id: changed-files
uses: tj-actions/changed-files@v46.0.4
with:
files: ${{ inputs.changed-files-pattern }}
cache-check:
needs: [changed-files]
runs-on: ${{ fromJSON(inputs.runner) }}
name: Check cache key
outputs:
cache_key: ${{ steps.cache-key.outputs.key }}
cache_hit: ${{ steps.cache.outputs.cache-hit }}
steps:
- name: Checkout
uses: actions/checkout@v4
- name: Generate cache key
if: inputs.skip-fern-generation != true || (!contains(needs.changed-files.outputs.all_changed_files, 'apps/fern/openapi.json') && !contains(needs.changed-files.outputs.all_changed_files, 'apps/fern/openapi-overrides.yml'))
id: cache-key
run: |
echo "key=sdk-${{ github.ref_name }}-${{ hashFiles('apps/fern/*', 'apps/core/pyproject.toml') }}" >> $GITHUB_OUTPUT
- name: Restore SDK cache
# skip if "skip-fern-generation" is true or if the upstream workflow would've generated an sdk preview (changes to openapi files)
if: inputs.skip-fern-generation != true || (!contains(needs.changed-files.outputs.all_changed_files, 'apps/fern/openapi.json') && !contains(needs.changed-files.outputs.all_changed_files, 'apps/fern/openapi-overrides.yml'))
id: cache
uses: actions/cache/restore@v4
with:
path: |
apps/fern/.preview/fern-python-sdk/
key: ${{ steps.cache-key.outputs.key }}
fail-on-cache-miss: false
block-until-sdk-preview-finishes:
needs: [changed-files, cache-check]
if: |
needs.cache-check.outputs.cache_hit != 'true'
timeout-minutes: ${{ inputs.timeout-minutes }}
runs-on: ${{ fromJSON(inputs.runner) }}
name: block-until-sdk-preview-finishes
steps:
- name: Debug ref information
run: |
echo "Input ref: ${{ inputs.ref }}"
echo "GitHub SHA: ${{ github.sha }}"
echo "GitHub ref: ${{ github.ref }}"
echo "PR head SHA: ${{ github.event.pull_request.head.sha }}"
echo "Event name: ${{ github.event_name }}"
- name: Wait for Preview SDK workflow
if: inputs.skip-fern-generation != true || (!contains(needs.changed-files.outputs.all_changed_files, 'apps/fern/openapi.json') && !contains(needs.changed-files.outputs.all_changed_files, 'apps/fern/openapi-overrides.yml'))
env:
GH_TOKEN: ${{ secrets.GITHUB_TOKEN }}
run: |
echo "Waiting for 'preview-python-sdk' check to complete on ref: ${{ inputs.ref }}"
# Wait for the check to complete with timeout
timeout_seconds=1800
interval_seconds=60
elapsed=0
while [ $elapsed -lt $timeout_seconds ]; do
echo "Checking status... (elapsed: ${elapsed}s)"
# Get check runs using pr checks syntax with branch name or PR number
if [ "${{ github.event_name }}" = "pull_request" ]; then
pr_identifier="${{ github.event.pull_request.number }}"
else
pr_identifier="${{ github.ref_name }}"
fi
check_info=$(gh pr checks "$pr_identifier" -R ${{ github.repository }} --json name,state,startedAt \
| jq -r '.[] | select(.name == "preview-python-sdk") | [.startedAt, .state] | @tsv' | sort -r | head -1 | cut -f2)
if [ -n "$check_info" ]; then
echo "Check state: $check_info"
if [ "$check_info" = "SUCCESS" ] || [ "$check_info" = "SKIPPED" ]; then
echo "Check completed with state: $check_info"
exit 0
elif [ "$check_info" = "FAILURE" ] || [ "$check_info" = "CANCELLED" ]; then
echo "❌ Preview Python SDK build failed with state: $check_info"
echo "🚫 Blocking dependent test jobs to prevent extraneous failures"
echo "📋 To fix: Check the 'preview-python-sdk' job logs for build errors"
exit 1
fi
else
echo "Check 'preview-python-sdk' not found yet"
fi
sleep $interval_seconds
elapsed=$((elapsed + interval_seconds))
done
echo "Timeout waiting for check to complete"
exit 1
test-run:
needs: [changed-files, block-until-sdk-preview-finishes]
if: |
always() &&
needs.changed-files.outputs.any_changed == 'true' &&
(needs.block-until-sdk-preview-finishes.result == 'success' ||
needs.block-until-sdk-preview-finishes.result == 'skipped')
runs-on: ${{ fromJSON(inputs.runner) }}
timeout-minutes: ${{ inputs.timeout-minutes }}
strategy: ${{ fromJSON(inputs.matrix-strategy) }}
services:
postgres:
image: pgvector/pgvector:pg17
ports:
# avoids conflict with docker postgres
- ${{ inputs.use-docker && '9999:5432' || '5432:5432' }}
env:
POSTGRES_HOST_AUTH_METHOD: trust
POSTGRES_DB: postgres
POSTGRES_USER: postgres
options: >-
--health-cmd pg_isready
--health-interval 10s
--health-timeout 5s
--health-retries 5
redis:
image: ${{ inputs.use-redis && 'redis:8-alpine' || '' }}
options: >-
--health-cmd "redis-cli ping"
--health-interval 10s
--health-timeout 5s
--health-retries 5
ports:
- 6379:6379
steps:
- name: Checkout
uses: actions/checkout@v4
- name: Install uv
uses: astral-sh/setup-uv@v6
with:
enable-cache: true
- name: Detect core directory
id: detect-core-dir
run: |
if [ "${{ inputs.core-directory }}" = "auto" ]; then
if [ -d "apps/core" ]; then
echo "dir=apps/core" >> $GITHUB_OUTPUT
echo "detected=cloud" >> $GITHUB_OUTPUT
else
echo "dir=." >> $GITHUB_OUTPUT
echo "detected=oss" >> $GITHUB_OUTPUT
fi
else
echo "dir=${{ inputs.core-directory }}" >> $GITHUB_OUTPUT
echo "detected=manual" >> $GITHUB_OUTPUT
fi
echo "Using core directory: $(cat $GITHUB_OUTPUT | grep '^dir=' | cut -d'=' -f2)"
- name: Generate cache key
if: inputs.skip-fern-generation != true || (!contains(needs.changed-files.outputs.all_changed_files, 'apps/fern/openapi.json') && !contains(needs.changed-files.outputs.all_changed_files, 'apps/fern/openapi-overrides.yml'))
id: cache-key
run: |
echo "key=sdk-${{ github.ref_name }}-${{ hashFiles('apps/fern/*', 'apps/core/pyproject.toml') }}" >> $GITHUB_OUTPUT
- name: Restore SDK cache
# skip if "skip-fern-generation" is true or if the upstream workflow would've generated an sdk preview (changes to openapi files)
if: inputs.skip-fern-generation != true || (!contains(needs.changed-files.outputs.all_changed_files, 'apps/fern/openapi.json') && !contains(needs.changed-files.outputs.all_changed_files, 'apps/fern/openapi-overrides.yml'))
id: restore-sdk-cache
uses: actions/cache/restore@v4
with:
path: |
apps/fern/.preview/fern-python-sdk/
key: ${{ steps.cache-key.outputs.key }}
fail-on-cache-miss: false
- name: Check SDK cache availability
if: (inputs.skip-fern-generation != true || (!contains(needs.changed-files.outputs.all_changed_files, 'apps/fern/openapi.json') && !contains(needs.changed-files.outputs.all_changed_files, 'apps/fern/openapi-overrides.yml'))) && steps.restore-sdk-cache.outputs.cache-hit != 'true'
run: |
echo "❌ Preview Python SDK cache expired or missing!"
echo "📦 Cache key: ${{ steps.cache-key.outputs.key }}"
echo "🔄 To fix: Re-run the 'preview-python-sdk' workflow job to regenerate the SDK"
echo "💡 This can happen when:"
echo " - The cache entry has expired"
echo " - Dependencies in apps/fern/* or apps/core/pyproject.toml have changed"
echo " - The preview-python-sdk job hasn't run successfully for this branch/commit"
exit 1
- name: Install dependencies with retry
shell: bash
working-directory: ${{ steps.detect-core-dir.outputs.dir }}
run: |
uv sync --no-install-project ${{ inputs.install-args }}
- name: Install custom SDK
if: inputs.skip-fern-generation != true
working-directory: ${{ steps.detect-core-dir.outputs.dir }}
run: |
echo "Fixing Fern SDK pyproject.toml for uv compatibility..."
SDK_PYPROJECT="../fern/.preview/fern-python-sdk/pyproject.toml"
VERSION=$(grep -A 10 '^\[tool\.poetry\]' "$SDK_PYPROJECT" | grep '^version' | head -1 | cut -d'"' -f2)
head -n 2 < ../fern/.preview/fern-python-sdk/pyproject.toml > ../fern/.preview/fern-python-sdk/pyproject.toml.tmp
echo "version = \"$VERSION\"" >> ../fern/.preview/fern-python-sdk/pyproject.toml.tmp
tail -n +3 ../fern/.preview/fern-python-sdk/pyproject.toml >> ../fern/.preview/fern-python-sdk/pyproject.toml.tmp
mv ../fern/.preview/fern-python-sdk/pyproject.toml.tmp ../fern/.preview/fern-python-sdk/pyproject.toml
uv pip install -e ../fern/.preview/fern-python-sdk/.
- name: Migrate database
if: inputs.use-docker != true && inputs.test-type != 'sqlite'
working-directory: ${{ steps.detect-core-dir.outputs.dir }}
env:
LETTA_PG_PORT: 5432
LETTA_PG_USER: postgres
LETTA_PG_PASSWORD: postgres
LETTA_PG_DB: postgres
LETTA_PG_HOST: localhost
run: |
psql -h localhost -U postgres -d postgres -c 'CREATE EXTENSION vector'
uv run alembic upgrade head
- name: Inject env vars into environment
working-directory: ${{ steps.detect-core-dir.outputs.dir }}
run: |
# Get secrets and mask them before adding to environment
while IFS= read -r line || [[ -n "$line" ]]; do
if [[ -n "$line" ]]; then
value=$(echo "$line" | cut -d= -f2-)
echo "::add-mask::$value"
echo "$line" >> $GITHUB_ENV
fi
done < <(letta_secrets_helper --env dev --service ci)
- name: Docker setup for Docker tests
if: inputs.use-docker
run: |
mkdir -p /home/ci-runner/.letta/logs
sudo chown -R $USER:$USER /home/ci-runner/.letta/logs
chmod -R 755 /home/ci-runner/.letta/logs
- name: Build and run docker dev server
if: inputs.use-docker
env:
LETTA_PG_DB: letta
LETTA_PG_USER: letta
LETTA_PG_PASSWORD: letta
LETTA_PG_PORT: 5432
OPENAI_API_KEY: ${{ env.OPENAI_API_KEY }}
run: |
cd libs/config-core-deploy
docker compose -f compose.yaml up --build -d
- name: Wait for Docker service
if: inputs.use-docker
working-directory: ${{ steps.detect-core-dir.outputs.dir }}
run: |
bash scripts/wait_for_service.sh localhost:8083 -- echo "Service is ready"
- name: Run tests
working-directory: ${{ steps.detect-core-dir.outputs.dir }}
env:
# Database configuration (shared, but values depend on Docker usage)
LETTA_PG_PORT: 5432
LETTA_PG_USER: ${{ inputs.use-docker && 'letta' || 'postgres' }}
LETTA_PG_PASSWORD: ${{ inputs.use-docker && 'letta' || 'postgres' }}
LETTA_PG_DB: ${{ inputs.use-docker && 'letta' || 'postgres' }}
LETTA_PG_HOST: localhost
# Server configuration (conditional)
LETTA_SERVER_PASS: test_server_token
# LLM Provider API Keys (shared across all test types)
OPENAI_API_KEY: ${{ env.OPENAI_API_KEY }}
ANTHROPIC_API_KEY: ${{ env.ANTHROPIC_API_KEY }}
GEMINI_API_KEY: ${{ env.GEMINI_API_KEY }}
GROQ_API_KEY: ${{ env.GROQ_API_KEY }}
AZURE_API_KEY: ${{ env.AZURE_API_KEY }}
AZURE_BASE_URL: ${{ secrets.AZURE_BASE_URL }}
DEEPSEEK_API_KEY: ${{ env.DEEPSEEK_API_KEY }}
LETTA_MISTRAL_API_KEY: ${{ secrets.LETTA_MISTRAL_API_KEY }}
# External service API Keys (shared across all test types)
COMPOSIO_API_KEY: ${{ env.COMPOSIO_API_KEY }}
E2B_API_KEY: ${{ env.E2B_API_KEY }}
E2B_SANDBOX_TEMPLATE_ID: ${{ env.E2B_SANDBOX_TEMPLATE_ID }}
TAVILY_API_KEY: ${{ secrets.TAVILY_API_KEY }}
PINECONE_API_KEY: ${{ secrets.PINECONE_API_KEY }}
PINECONE_INDEX_HOST: ${{ secrets.PINECONE_INDEX_HOST }}
PINECONE_NAMESPACE: ${{ secrets.PINECONE_NAMESPACE }}
# Turbopuffer flags
LETTA_USE_TPUF: true
LETTA_TPUF_API_KEY: ${{ env.LETTA_TPUF_API_KEY }}
# Google Cloud (shared across all test types)
GOOGLE_CLOUD_PROJECT: ${{ secrets.GOOGLE_CLOUD_PROJECT }}
GOOGLE_CLOUD_LOCATION: ${{ secrets.GOOGLE_CLOUD_LOCATION }}
# Feature flags (shared across all test types)
LETTA_ENABLE_BATCH_JOB_POLLING: true
LETTA_GEMINI_FORCE_MINIMUM_THINKING_BUDGET: true
LETTA_GEMINI_MAX_RETRIES: 10
# Pinecone flags
LETTA_PINECONE_API_KEY: ${{ secrets.LETTA_PINECONE_API_KEY }}
LETTA_ENABLE_PINECONE: ${{ secrets.LETTA_ENABLE_PINECONE }}
EXA_API_KEY: ${{ secrets.EXA_API_KEY }}
# Docker-specific environment variables
PYTHONPATH: ${{ inputs.use-docker && format('{0}:{1}', github.workspace, env.PYTHONPATH) || '' }}
LETTA_REDIS_HOST: localhost
run: |
set -o xtrace
# Set LETTA_SERVER_URL only for Docker tests
if [[ "${{ inputs.use-docker }}" == "true" ]]; then
export LETTA_SERVER_URL="http://localhost:8083"
fi
# Set LLM_CONFIG_FILE only for send-message tests
if [[ "${{ inputs.test-type }}" == "send-message" ]]; then
export LLM_CONFIG_FILE="${{ matrix.config_file }}"
fi
# Set Ollama base URL only for Ollama tests
if [[ "${{ inputs.test-type }}" == "integration" && "${{ inputs.runner }}" == *"ollama"* ]]; then
export LLM_CONFIG_FILE="ollama.json"
export OLLAMA_BASE_URL="http://localhost:11434"
fi
# Set LMStudio base URL only for LMStudio tests
if [[ "${{ inputs.test-type }}" == "integration" && "${{ inputs.runner }}" == *"lmstudio"* ]]; then
export LLM_CONFIG_FILE="lmstudio.json"
export LMSTUDIO_BASE_URL="http://localhost:1234"
fi
# Set VLLM base URL only for VLLM tests
if [[ "${{ inputs.test-type }}" == "integration" && "${{ inputs.runner }}" == *"vllm"* ]]; then
export LLM_CONFIG_FILE="vllm.json"
export VLLM_BASE_URL="http://localhost:8000"
fi
uv pip install pytest-github-actions-annotate-failures
# Handle different matrix variable names and test commands based on test type
if [[ "${{ inputs.test-type }}" == "integration" ]]; then
uv pip install letta
uv pip show letta
uv pip show letta-client
uv run --frozen pytest -svv ${{ inputs.test-path-prefix }}${{ matrix.test_suite }}
elif [[ "${{ inputs.test-type }}" == "unit" ]]; then
uv pip show letta-client
uv run --frozen pytest -svv ${{ inputs.test-path-prefix }}${{ matrix.test_suite }}
elif [[ "${{ inputs.test-type }}" == "send-message" ]]; then
uv run --frozen pytest -s -vv tests/integration_test_send_message.py --maxfail=1 --durations=10
elif [[ "${{ inputs.test-type }}" == "docker" ]]; then
uv run --frozen pytest -s tests/test_client.py
elif [[ "${{ inputs.test-type }}" == "sqlite" ]]; then
# force sqlite
unset LETTA_PG_USER
unset LETTA_PG_PASSWORD
unset LETTA_PG_DB
unset LETTA_PG_HOST
uv pip show letta-client
uv run alembic upgrade head
uv run --frozen pytest -svv ${{ inputs.test-path-prefix }}${{ matrix.test_suite }}
else
${{ inputs.test-command }}
fi
- name: Remove sqlite db
if: ${{ always() && inputs.test-type == 'sqlite' }}
run: sudo rm -rf ~/.letta || true
- name: Print docker logs if tests fail
if: ${{ (failure() || cancelled()) && inputs.use-docker }}
working-directory: libs/config-core-deploy
run: |
echo "Printing Docker Logs..."
docker compose -f compose.yaml logs
- name: Stop docker
if: ${{ always() && inputs.use-docker }}
working-directory: libs/config-core-deploy
run: |
docker compose -f compose.yaml down --volumes
sudo rm -rf .persist

View File

@@ -1,7 +1,7 @@
name: Send Message SDK Tests
on:
pull_request_target:
# branches: [main] # TODO: uncomment before merge
branches: [main] # TODO: uncomment before merge
types: [labeled]
paths:
- 'letta/**'

View File

@@ -0,0 +1,48 @@
name: 🐍🧪 [Core] Send Message SDK Tests
on:
pull_request:
branches:
- main
pull_request_target:
branches:
- main
types: [labeled]
concurrency:
group: ${{ github.workflow }}-${{ github.event.pull_request.number || github.ref }}
cancel-in-progress: ${{ github.ref != 'refs/heads/main' }}
jobs:
send-message-tests:
# Run on pull_request OR on pull_request_target only when labeled "safe to test"
if: github.event_name == 'pull_request' || (github.event_name == 'pull_request_target' && contains(github.event.pull_request.labels.*.name, 'safe to test'))
uses: ./.github/workflows/reusable-test-workflow.yml
with:
test-type: 'send-message'
changed-files-pattern: |
apps/core/**
.github/workflows/reusable-test-workflow.yml
.github/workflows/send-message-integration-tests.yml
install-args: '--extra dev --extra postgres --extra external-tools --extra cloud-tool-sandbox --extra google --extra redis'
timeout-minutes: 15
runner: '["self-hosted", "medium"]'
ref: ${{ github.event.pull_request.head.sha || github.sha }}
use-redis: true
# TODO: "azure-gpt-4o-mini.json" add back later, getting content violation
matrix-strategy: |
{
"fail-fast": false,
"matrix": {
"config_file": [
"openai-gpt-4o-mini.json",
"claude-4-sonnet-extended.json",
"claude-3-5-sonnet.json",
"claude-3-7-sonnet-extended.json",
"gemini-1.5-pro.json",
"gemini-2.5-pro.json",
"gemini-2.5-flash.json"
]
}
}
secrets: inherit

47
.github/workflows/test-lmstudio.yml vendored Normal file
View File

@@ -0,0 +1,47 @@
name: Self-Hosted Provider Integration - LMStudio
on:
workflow_dispatch:
# inputs:
# ref:
# description: 'Git ref to test'
# required: false
# type: string
# default: ${{ github.sha || github.ref || github.event.pull_request.head.sha }}
pull_request:
paths:
- 'apps/core/**'
- '.github/workflows/test-lmstudio.yml'
- '.github/workflows/reusable-test-workflow.yml'
pull_request_target:
types: [labeled]
paths:
- 'apps/core/**'
- '.github/workflows/test-lmstudio.yml'
- '.github/workflows/reusable-test-workflow.yml'
concurrency:
group: ${{ github.workflow }}-${{ github.event.pull_request.number || github.ref }}
cancel-in-progress: ${{ github.ref != 'refs/heads/main' }}
jobs:
test-lmstudio:
# Run on pull_request OR on pull_request_target only when labeled "safe to test"
if: github.event_name == 'workflow_dispatch' || github.event_name == 'pull_request' || (github.event_name == 'pull_request_target' && contains(github.event.pull_request.labels.*.name, 'safe to test'))
uses: ./.github/workflows/reusable-test-workflow.yml
with:
test-type: "integration"
install-args: "-E dev -E postgres -E external-tools -E tests -E cloud-tool-sandbox -E google"
test-command: "uv run pytest -svv tests/"
timeout-minutes: 60
runner: '["self-hosted", "gpu", "lmstudio"]'
matrix-strategy: |
{
"fail-fast": false,
"matrix": {
"test_suite": [
"integration_test_send_message.py"
]
}
}
secrets: inherit

48
.github/workflows/test-ollama.yml vendored Normal file
View File

@@ -0,0 +1,48 @@
name: Self-Hosted Provider Integration - Ollama
on:
workflow_dispatch:
# inputs:
# ref:
# description: 'Git ref to test'
# required: false
# type: string
# default: ${{ github.sha || github.ref || github.event.pull_request.head.sha }}
pull_request:
paths:
- 'apps/core/**'
- '.github/workflows/test-ollama.yml'
- '.github/workflows/reusable-test-workflow.yml'
pull_request_target:
types: [labeled]
paths:
- 'apps/core/**'
- '.github/workflows/test-ollama.yml'
- '.github/workflows/reusable-test-workflow.yml'
concurrency:
group: ${{ github.workflow }}-${{ github.event.pull_request.number || github.ref }}
cancel-in-progress: ${{ github.ref != 'refs/heads/main' }}
jobs:
test-ollama:
# Run on pull_request OR on pull_request_target only when labeled "safe to test"
if: github.event_name == 'workflow_dispatch' || github.event_name == 'pull_request' || (github.event_name == 'pull_request_target' && contains(github.event.pull_request.labels.*.name, 'safe to test'))
uses: ./.github/workflows/reusable-test-workflow.yml
with:
test-type: "integration"
install-args: "--extra postgres --extra external-tools --extra dev --extra cloud-tool-sandbox --extra google"
test-command: "uv run --frozen pytest -svv tests/"
timeout-minutes: 60
runner: '["self-hosted", "gpu", "ollama"]'
matrix-strategy: |
{
"fail-fast": false,
"matrix": {
"test_suite": [
"test_providers.py::test_ollama",
"integration_test_send_message.py"
]
}
}
secrets: inherit

44
.github/workflows/test-vllm.yml vendored Normal file
View File

@@ -0,0 +1,44 @@
name: Self-Hosted Provider Integration - vLLM
on:
workflow_dispatch:
# inputs:
# ref:
# description: 'Git ref to test'
# required: false
# type: string
# default: ${{ github.sha || github.ref || github.event.pull_request.head.sha }}
pull_request:
paths:
- 'apps/core/**'
- '.github/workflows/test-vllm.yml'
- '.github/workflows/reusable-test-workflow.yml'
pull_request_target:
types: [labeled]
paths:
- 'apps/core/**'
- '.github/workflows/test-vllm.yml'
- '.github/workflows/reusable-test-workflow.yml'
jobs:
test-vllm:
# Run on pull_request OR on pull_request_target only when labeled "safe to test"
if: github.event_name == 'workflow_dispatch' || github.event_name == 'pull_request' || (github.event_name == 'pull_request_target' && contains(github.event.pull_request.labels.*.name, 'safe to test'))
uses: ./.github/workflows/reusable-test-workflow.yml
with:
test-type: "integration"
install-args: "--extra postgres --extra external-tools --extra dev --extra cloud-tool-sandbox --extra google"
test-command: "uv run --frozen pytest -svv tests/"
timeout-minutes: 60
runner: '["self-hosted", "gpu", "vllm"]'
matrix-strategy: |
{
"fail-fast": false,
"matrix": {
"test_suite": [
"test_providers.py::test_vllm",
"integration_test_send_message.py"
]
}
}
secrets: inherit