diff --git a/.github/scripts/model-sweep/conftest.py b/.github/scripts/model-sweep/conftest.py index 146bf058..edc0ae34 100644 --- a/.github/scripts/model-sweep/conftest.py +++ b/.github/scripts/model-sweep/conftest.py @@ -16,7 +16,6 @@ from letta.schemas.agent import AgentState from letta.schemas.llm_config import LLMConfig from letta.services.organization_manager import OrganizationManager from letta.services.user_manager import UserManager -from letta.settings import tool_settings def pytest_configure(config): diff --git a/.github/scripts/model-sweep/generate_model_sweep_markdown.py b/.github/scripts/model-sweep/generate_model_sweep_markdown.py index 38552a8c..c82e051d 100644 --- a/.github/scripts/model-sweep/generate_model_sweep_markdown.py +++ b/.github/scripts/model-sweep/generate_model_sweep_markdown.py @@ -31,7 +31,7 @@ def get_support_status(passed_tests, feature_tests): # Filter out error tests when checking for support non_error_tests = [test for test in feature_tests if not test.endswith("_error")] - error_tests = [test for test in feature_tests if test.endswith("_error")] + [test for test in feature_tests if test.endswith("_error")] # Check which non-error tests passed passed_non_error_tests = [test for test in non_error_tests if test in passed_tests] @@ -137,7 +137,7 @@ def get_github_repo_info(): else: return None return repo_path - except: + except Exception: pass # Default fallback @@ -335,7 +335,7 @@ def process_model_sweep_report(input_file, output_file, config_file=None, debug= # Format timestamp if it's a full ISO string if "T" in str(last_scanned): last_scanned = str(last_scanned).split("T")[0] # Just the date part - except: + except Exception: last_scanned = "Unknown" # Calculate support score for ranking diff --git a/.github/scripts/model-sweep/model_sweep.py b/.github/scripts/model-sweep/model_sweep.py index 97a19306..086ea0a4 100644 --- a/.github/scripts/model-sweep/model_sweep.py +++ b/.github/scripts/model-sweep/model_sweep.py @@ -1,16 +1,12 @@ import base64 import json import os -import socket -import threading import time import uuid from typing import Any, Dict, List import httpx import pytest -import requests -from dotenv import load_dotenv from letta_client import Letta, MessageCreate, Run from letta_client.core.api_error import ApiError from letta_client.types import ( @@ -694,7 +690,7 @@ def test_token_streaming_agent_loop_error( stream_tokens=True, ) list(response) - except: + except Exception: pass # only some models throw an error TODO: make this consistent messages_from_db = client.agents.messages.list(agent_id=agent_state.id, after=last_message[0].id) diff --git a/.github/workflows/reusable-test-workflow.yml b/.github/workflows/reusable-test-workflow.yml index 88691161..955cd72d 100644 --- a/.github/workflows/reusable-test-workflow.yml +++ b/.github/workflows/reusable-test-workflow.yml @@ -381,6 +381,10 @@ jobs: GOOGLE_CLOUD_PROJECT: ${{ secrets.GOOGLE_CLOUD_PROJECT }} GOOGLE_CLOUD_LOCATION: ${{ secrets.GOOGLE_CLOUD_LOCATION }} + # Real object store (required for git-backed memory integration test) + # Use DEV bucket/prefix variable to avoid prod resources. + LETTA_OBJECT_STORE_URI: ${{ vars.LETTA_OBJECT_STORE_URI_DEV }} + # Feature flags (shared across all test types) LETTA_ENABLE_BATCH_JOB_POLLING: true diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index dade61ca..90fd016c 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -23,3 +23,10 @@ repos: - id: ruff-check args: [ --fix ] - id: ruff-format + + - repo: local + hooks: + - id: ty + name: ty check + entry: uv run ty check . + language: python diff --git a/Dockerfile b/Dockerfile index f15af803..2fee7e57 100644 --- a/Dockerfile +++ b/Dockerfile @@ -1,5 +1,5 @@ # Start with pgvector base for builder -FROM ankane/pgvector:v0.5.1 AS builder +FROM pgvector/pgvector:0.8.1-pg15 AS builder # comment to trigger ci # Install Python and required packages RUN apt-get update && apt-get install -y \ @@ -39,7 +39,7 @@ COPY . . RUN uv sync --frozen --no-dev --all-extras --python 3.11 # Runtime stage -FROM ankane/pgvector:v0.5.1 AS runtime +FROM pgvector/pgvector:0.8.1-pg15 AS runtime # Overridable Node.js version with --build-arg NODE_VERSION ARG NODE_VERSION=22 diff --git a/alembic/versions/038e68cdf0df_add_cascades_to_blocks_agents_fks_set_.py b/alembic/versions/038e68cdf0df_add_cascades_to_blocks_agents_fks_set_.py index 83406ef5..81f0e7d1 100644 --- a/alembic/versions/038e68cdf0df_add_cascades_to_blocks_agents_fks_set_.py +++ b/alembic/versions/038e68cdf0df_add_cascades_to_blocks_agents_fks_set_.py @@ -8,8 +8,6 @@ Create Date: 2025-10-07 13:01:17.872405 from typing import Sequence, Union -import sqlalchemy as sa - from alembic import op # revision identifiers, used by Alembic. diff --git a/alembic/versions/18ff61fbc034_add_agent_id_index_to_mapping_tables.py b/alembic/versions/18ff61fbc034_add_agent_id_index_to_mapping_tables.py index 825ead37..29e1c65d 100644 --- a/alembic/versions/18ff61fbc034_add_agent_id_index_to_mapping_tables.py +++ b/alembic/versions/18ff61fbc034_add_agent_id_index_to_mapping_tables.py @@ -8,8 +8,6 @@ Create Date: 2025-09-10 19:16:39.118760 from typing import Sequence, Union -import sqlalchemy as sa - from alembic import op # revision identifiers, used by Alembic. diff --git a/alembic/versions/39577145c45d_add_project_constraint_on_tools.py b/alembic/versions/39577145c45d_add_project_constraint_on_tools.py index 98a6b6be..b9d4e866 100644 --- a/alembic/versions/39577145c45d_add_project_constraint_on_tools.py +++ b/alembic/versions/39577145c45d_add_project_constraint_on_tools.py @@ -8,8 +8,6 @@ Create Date: 2025-12-17 15:46:06.184858 from typing import Sequence, Union -import sqlalchemy as sa - from alembic import op # revision identifiers, used by Alembic. diff --git a/alembic/versions/3bc3c031fbe4_create_new_runs_table_and_remove_legacy_.py b/alembic/versions/3bc3c031fbe4_create_new_runs_table_and_remove_legacy_.py index 5339801b..3a194649 100644 --- a/alembic/versions/3bc3c031fbe4_create_new_runs_table_and_remove_legacy_.py +++ b/alembic/versions/3bc3c031fbe4_create_new_runs_table_and_remove_legacy_.py @@ -8,8 +8,6 @@ Create Date: 2025-10-03 12:10:51.065067 from typing import Sequence, Union -import sqlalchemy as sa - from alembic import op # revision identifiers, used by Alembic. diff --git a/alembic/versions/3e54e2fa2f7e_add_usage_columns_to_steps.py b/alembic/versions/3e54e2fa2f7e_add_usage_columns_to_steps.py new file mode 100644 index 00000000..997d0d80 --- /dev/null +++ b/alembic/versions/3e54e2fa2f7e_add_usage_columns_to_steps.py @@ -0,0 +1,33 @@ +"""add_usage_columns_to_steps + +Revision ID: 3e54e2fa2f7e +Revises: a1b2c3d4e5f8 +Create Date: 2026-02-03 16:35:51.327031 + +""" + +from typing import Sequence, Union + +import sqlalchemy as sa + +from alembic import op + +# revision identifiers, used by Alembic. +revision: str = "3e54e2fa2f7e" +down_revision: Union[str, None] = "a1b2c3d4e5f8" +branch_labels: Union[str, Sequence[str], None] = None +depends_on: Union[str, Sequence[str], None] = None + + +def upgrade() -> None: + op.add_column("steps", sa.Column("model_handle", sa.String(), nullable=True)) + op.add_column("steps", sa.Column("cached_input_tokens", sa.Integer(), nullable=True)) + op.add_column("steps", sa.Column("cache_write_tokens", sa.Integer(), nullable=True)) + op.add_column("steps", sa.Column("reasoning_tokens", sa.Integer(), nullable=True)) + + +def downgrade() -> None: + op.drop_column("steps", "reasoning_tokens") + op.drop_column("steps", "cache_write_tokens") + op.drop_column("steps", "cached_input_tokens") + op.drop_column("steps", "model_handle") diff --git a/alembic/versions/54dec07619c4_divide_passage_table_into_.py b/alembic/versions/54dec07619c4_divide_passage_table_into_.py index e58a490a..e8c85fef 100644 --- a/alembic/versions/54dec07619c4_divide_passage_table_into_.py +++ b/alembic/versions/54dec07619c4_divide_passage_table_into_.py @@ -9,6 +9,7 @@ Create Date: 2024-12-14 17:23:08.772554 from typing import Sequence, Union import sqlalchemy as sa +from pgvector.sqlalchemy import Vector from sqlalchemy.dialects import postgresql from alembic import op diff --git a/alembic/versions/57bcea83af3f_add_various_indexes.py b/alembic/versions/57bcea83af3f_add_various_indexes.py index a2e71cd4..14a2f091 100644 --- a/alembic/versions/57bcea83af3f_add_various_indexes.py +++ b/alembic/versions/57bcea83af3f_add_various_indexes.py @@ -8,8 +8,6 @@ Create Date: 2025-09-19 10:58:19.658106 from typing import Sequence, Union -import sqlalchemy as sa - from alembic import op # revision identifiers, used by Alembic. diff --git a/alembic/versions/89b595051e48_replace_composite_runs_index.py b/alembic/versions/89b595051e48_replace_composite_runs_index.py index 8988a376..ea5494de 100644 --- a/alembic/versions/89b595051e48_replace_composite_runs_index.py +++ b/alembic/versions/89b595051e48_replace_composite_runs_index.py @@ -8,8 +8,6 @@ Create Date: 2025-10-06 13:17:09.918439 from typing import Sequence, Union -import sqlalchemy as sa - from alembic import op # revision identifiers, used by Alembic. diff --git a/alembic/versions/a1b2c3d4e5f6_add_index_to_step_metrics_run_id.py b/alembic/versions/a1b2c3d4e5f6_add_index_to_step_metrics_run_id.py index d2a43356..6970a70a 100644 --- a/alembic/versions/a1b2c3d4e5f6_add_index_to_step_metrics_run_id.py +++ b/alembic/versions/a1b2c3d4e5f6_add_index_to_step_metrics_run_id.py @@ -8,8 +8,6 @@ Create Date: 2025-11-11 19:16:00.000000 from typing import Sequence, Union -import sqlalchemy as sa - from alembic import op from letta.settings import settings diff --git a/alembic/versions/af842aa6f743_add_tool_indexes_for_organization_id.py b/alembic/versions/af842aa6f743_add_tool_indexes_for_organization_id.py index 4295ae72..967532d5 100644 --- a/alembic/versions/af842aa6f743_add_tool_indexes_for_organization_id.py +++ b/alembic/versions/af842aa6f743_add_tool_indexes_for_organization_id.py @@ -8,8 +8,6 @@ Create Date: 2025-12-07 15:30:43.407495 from typing import Sequence, Union -import sqlalchemy as sa - from alembic import op # revision identifiers, used by Alembic. diff --git a/alembic/versions/b1c2d3e4f5a6_drop_unused_and_redundant_indexes.py b/alembic/versions/b1c2d3e4f5a6_drop_unused_and_redundant_indexes.py index e1c72dd1..6909f61f 100644 --- a/alembic/versions/b1c2d3e4f5a6_drop_unused_and_redundant_indexes.py +++ b/alembic/versions/b1c2d3e4f5a6_drop_unused_and_redundant_indexes.py @@ -8,8 +8,6 @@ Create Date: 2025-11-11 21:16:00.000000 from typing import Sequence, Union -import sqlalchemy as sa - from alembic import op from letta.settings import settings diff --git a/alembic/versions/b2c3d4e5f6a8_add_llm_config_to_conversations.py b/alembic/versions/b2c3d4e5f6a8_add_llm_config_to_conversations.py new file mode 100644 index 00000000..b8e94dc5 --- /dev/null +++ b/alembic/versions/b2c3d4e5f6a8_add_llm_config_to_conversations.py @@ -0,0 +1,29 @@ +"""Add model and model_settings columns to conversations table for model overrides + +Revision ID: b2c3d4e5f6a8 +Revises: 3e54e2fa2f7e +Create Date: 2026-02-23 02:50:00.000000 + +""" + +from typing import Sequence, Union + +import sqlalchemy as sa + +from alembic import op + +# revision identifiers, used by Alembic. +revision: str = "b2c3d4e5f6a8" +down_revision: Union[str, None] = "3e54e2fa2f7e" +branch_labels: Union[str, Sequence[str], None] = None +depends_on: Union[str, Sequence[str], None] = None + + +def upgrade() -> None: + op.add_column("conversations", sa.Column("model", sa.String(), nullable=True)) + op.add_column("conversations", sa.Column("model_settings", sa.JSON(), nullable=True)) + + +def downgrade() -> None: + op.drop_column("conversations", "model_settings") + op.drop_column("conversations", "model") diff --git a/alembic/versions/b888f21b151f_add_vector_db_provider_to_source.py b/alembic/versions/b888f21b151f_add_vector_db_provider_to_source.py index 8b909295..ba91726f 100644 --- a/alembic/versions/b888f21b151f_add_vector_db_provider_to_source.py +++ b/alembic/versions/b888f21b151f_add_vector_db_provider_to_source.py @@ -23,7 +23,7 @@ depends_on: Union[str, Sequence[str], None] = None def upgrade() -> None: # determine backfill value based on current pinecone settings try: - from pinecone import IndexEmbed, PineconeAsyncio + from pinecone import IndexEmbed, PineconeAsyncio # noqa: F401 pinecone_available = True except ImportError: diff --git a/alembic/versions/d06594144ef3_add_and_migrate_encrypted_columns_for_.py b/alembic/versions/d06594144ef3_add_and_migrate_encrypted_columns_for_.py index 9fa5fec5..f2d54413 100644 --- a/alembic/versions/d06594144ef3_add_and_migrate_encrypted_columns_for_.py +++ b/alembic/versions/d06594144ef3_add_and_migrate_encrypted_columns_for_.py @@ -10,8 +10,6 @@ import json import os # Add the app directory to path to import our crypto utils -import sys -from pathlib import Path from typing import Sequence, Union import sqlalchemy as sa diff --git a/alembic/versions/d798609d65ff_add_index_on_messages_step_id.py b/alembic/versions/d798609d65ff_add_index_on_messages_step_id.py index a289a62a..091a9636 100644 --- a/alembic/versions/d798609d65ff_add_index_on_messages_step_id.py +++ b/alembic/versions/d798609d65ff_add_index_on_messages_step_id.py @@ -8,8 +8,6 @@ Create Date: 2025-11-07 15:43:59.446292 from typing import Sequence, Union -import sqlalchemy as sa - from alembic import op from letta.settings import settings diff --git a/alembic/versions/f9ad1c25fd2b_add_query_optimizing_runs_listing.py b/alembic/versions/f9ad1c25fd2b_add_query_optimizing_runs_listing.py index d4a265b6..61ba9ac1 100644 --- a/alembic/versions/f9ad1c25fd2b_add_query_optimizing_runs_listing.py +++ b/alembic/versions/f9ad1c25fd2b_add_query_optimizing_runs_listing.py @@ -8,8 +8,6 @@ Create Date: 2025-10-04 00:44:06.663817 from typing import Sequence, Union -import sqlalchemy as sa - from alembic import op # revision identifiers, used by Alembic. diff --git a/conf.yaml b/conf.yaml new file mode 100644 index 00000000..b3b2eb82 --- /dev/null +++ b/conf.yaml @@ -0,0 +1,412 @@ +# Letta Configuration File +# Place at ~/.letta/conf.yaml, ./conf.yaml, or set LETTA_CONFIG_PATH +# Environment variables take precedence over config file values +# +# Top-level keys and their env var mappings: +# letta: -> LETTA_* +# model: -> Provider-prefixed (OPENAI_*, ANTHROPIC_*, etc.) +# tool: -> Prefix-based (E2B_*, MCP_*, TOOL_*, etc.) +# datadog: -> DD_* + +letta: + # ============================================================================= + # Core Settings (LETTA_*) + # ============================================================================= + debug: false + # environment: "" + + # Default handles + # default_llm_handle: "" + # default_embedding_handle: "" + + # SSE Streaming + enable_keepalive: true + keepalive_interval: 50.0 + enable_cancellation_aware_streaming: true + + # ============================================================================= + # PostgreSQL (LETTA_PG_*) + # ============================================================================= + pg: + # db: "" + # user: "" + # password: "" + # host: "" + # port: "" + # uri: "" + pool_size: 25 + max_overflow: 10 + pool_timeout: 30 + pool_recycle: 1800 + echo: false + + # Connection pool settings (LETTA_POOL_*) + pool: + pre_ping: true + use_lifo: true + + # Database settings (LETTA_DB_*) + # db: + # max_concurrent_sessions: "" + + disable_sqlalchemy_pooling: true + enable_db_pool_monitoring: true + db_pool_monitoring_interval: 30 + + # ============================================================================= + # Redis (LETTA_REDIS_*) + # ============================================================================= + redis: + # host: "" + port: 6379 + + # ============================================================================= + # Multi-Agent (LETTA_MULTI_AGENT_*) + # ============================================================================= + multi_agent: + send_message_max_retries: 3 + send_message_timeout: 1200 + concurrent_sends: 50 + + # ============================================================================= + # OTEL / Observability (LETTA_OTEL_*, LETTA_CLICKHOUSE_*) + # ============================================================================= + otel: + # exporter_otlp_endpoint: "" + preferred_temporality: 1 + + clickhouse: + # endpoint: "" + database: otel + username: default + # password: "" + + disable_tracing: false + llm_api_logging: true + track_last_agent_run: false + track_errored_messages: true + track_stop_reason: true + track_agent_run: true + track_provider_trace: true + + # ============================================================================= + # Uvicorn (LETTA_UVICORN_*) + # ============================================================================= + uvicorn: + workers: 1 + reload: false + timeout_keep_alive: 5 + + # Runtime settings + use_uvloop: false + use_granian: false + sqlalchemy_tracing: false + event_loop_threadpool_max_workers: 43 + + # ============================================================================= + # Experimental + # ============================================================================= + use_vertex_structured_outputs_experimental: false + use_asyncio_shield: true + + # ============================================================================= + # Lettuce (LETTA_USE_LETTUCE_*) + # ============================================================================= + use_lettuce_for_file_uploads: false + + # ============================================================================= + # Batch Job Polling (LETTA_POLL_*, LETTA_BATCH_*) + # ============================================================================= + enable_batch_job_polling: false + poll_running_llm_batches_interval_seconds: 300 + poll_lock_retry_interval_seconds: 480 + batch_job_polling_lookback_weeks: 2 + # batch_job_polling_batch_size: "" + + # ============================================================================= + # LLM Timeouts (LETTA_LLM_*) + # ============================================================================= + llm: + request_timeout_seconds: 60.0 + stream_timeout_seconds: 600.0 + + # ============================================================================= + # Pinecone (LETTA_PINECONE_*, LETTA_ENABLE_PINECONE, LETTA_UPSERT_PINECONE_INDICES) + # ============================================================================= + enable_pinecone: false + upsert_pinecone_indices: false + pinecone: + # api_key: "" + source_index: sources + agent_index: recall + + # ============================================================================= + # Turbopuffer (LETTA_TPUF_*, LETTA_USE_TPUF, LETTA_EMBED_*) + # ============================================================================= + use_tpuf: false + embed_all_messages: false + embed_tools: false + tpuf: + # api_key: "" + region: gcp-us-central1 + + # ============================================================================= + # File Processing (LETTA_FILE_PROCESSING_*) + # ============================================================================= + file_processing: + timeout_minutes: 30 + timeout_error_message: "File processing timed out after {} minutes. Please try again." + + # ============================================================================= + # Letta Client (LETTA_DEFAULT_*) + # ============================================================================= + default_base_url: http://localhost:8283 + # default_token: "" + + # ============================================================================= + # Agent Architecture + # ============================================================================= + use_letta_v1_agent: false + archival_memory_token_limit: 8192 + + # ============================================================================= + # Security + # ============================================================================= + no_default_actor: false + # encryption_key: "" + + # ============================================================================= + # OCR + # ============================================================================= + # mistral_api_key: "" + + # ============================================================================= + # Summarizer (LETTA_SUMMARIZER_*) + # ============================================================================= + summarizer: + mode: partial_evict_message_buffer_mode + message_buffer_limit: 60 + message_buffer_min: 15 + enable_summarization: true + max_summarization_retries: 3 + partial_evict_summarizer_percentage: 0.30 + evict_all_messages: false + max_summarizer_retries: 3 + memory_warning_threshold: 0.75 + send_memory_warning_message: false + desired_memory_token_pressure: 0.3 + keep_last_n_messages: 0 + + # ============================================================================= + # Logging (LETTA_LOGGING_*) + # ============================================================================= + logging: + debug: false + json_logging: false + log_level: WARNING + verbose_telemetry_logging: false + + # ============================================================================= + # Telemetry (LETTA_TELEMETRY_*) + # ============================================================================= + telemetry: + enable_datadog: false + provider_trace_backend: postgres + socket_path: /var/run/telemetry/telemetry.sock + provider_trace_pg_metadata_only: false + # source: "" + + # Datadog settings (LETTA_TELEMETRY_DATADOG_*) + datadog: + agent_host: localhost + agent_port: 8126 + service_name: letta-server + profiling_enabled: false + profiling_memory_enabled: false + profiling_heap_enabled: false + # git_repository_url: "" + # git_commit_sha: "" + main_package: letta + +# ============================================================================= +# Model Settings (-> OPENAI_*, ANTHROPIC_*, AWS_*, etc.) +# ============================================================================= +model: + # Global settings + global_max_context_window_limit: 32000 + inner_thoughts_kwarg: thinking + default_prompt_formatter: chatml + + # OpenAI (-> OPENAI_*) + openai: + # api_key: "" + api_base: https://api.openai.com/v1 + + # Anthropic (-> ANTHROPIC_*) + anthropic: + # api_key: "" + max_retries: 3 + sonnet_1m: false + + # Azure OpenAI (-> AZURE_*) + azure: + # api_key: "" + # base_url: "" + api_version: "2024-09-01-preview" + + # Google Gemini (-> GEMINI_*) + gemini: + # api_key: "" + base_url: https://generativelanguage.googleapis.com/ + force_minimum_thinking_budget: false + max_retries: 5 + + # Google Vertex (-> GOOGLE_CLOUD_*) + # google_cloud: + # project: "" + # location: "" + + # AWS Bedrock (-> AWS_*, BEDROCK_*) + aws: + # access_key_id: "" + # secret_access_key: "" + default_region: us-east-1 + + bedrock: + anthropic_version: bedrock-2023-05-31 + + # OpenRouter (-> OPENROUTER_*) + # openrouter: + # api_key: "" + # referer: "" + # title: "" + # handle_base: "" + + # Groq (-> GROQ_*) + # groq: + # api_key: "" + + # Together (-> TOGETHER_*) + # together: + # api_key: "" + + # DeepSeek (-> DEEPSEEK_*) + # deepseek: + # api_key: "" + + # xAI/Grok (-> XAI_*) + # xai: + # api_key: "" + + # Z.ai/ZhipuAI (-> ZAI_*) + zai: + # api_key: "" + base_url: https://api.z.ai/api/paas/v4/ + + # MiniMax (-> MINIMAX_*) + # minimax: + # api_key: "" + + # Ollama (-> OLLAMA_*) + # ollama: + # base_url: "" + + # vLLM (-> VLLM_*) + # vllm: + # api_base: "" + # handle_base: "" + + # SGLang (-> SGLANG_*) + # sglang: + # api_base: "" + # handle_base: "" + + # LM Studio (-> LMSTUDIO_*) + # lmstudio: + # base_url: "" + + # OpenLLM (-> OPENLLM_*) + # openllm: + # auth_type: "" + # api_key: "" + +# ============================================================================= +# Tool Settings (-> E2B_*, MCP_*, MODAL_*, TOOL_*, etc.) +# ============================================================================= +tool: + # E2B Sandbox (-> E2B_*) + # e2b: + # api_key: "" + # sandbox_template_id: "" + + # Modal Sandbox (-> MODAL_*) + # modal: + # token_id: "" + # token_secret: "" + + # Search Providers (-> TAVILY_*, EXA_*) + # tavily: + # api_key: "" + + # exa: + # api_key: "" + + # Local Sandbox (-> TOOL_*) + tool: + # exec_dir: "" + sandbox_timeout: 180 + # exec_venv_name: "" + exec_autoreload_venv: true + + # MCP (-> MCP_*) + mcp: + connect_to_server_timeout: 30.0 + list_tools_timeout: 30.0 + execute_tool_timeout: 60.0 + read_from_config: false + disable_stdio: true + +# ============================================================================= +# Datadog Agent Settings (-> DD_*) +# ============================================================================= +# datadog: +# site: "" +# service: "" +# version: "" +# +# trace: +# enabled: false +# agent_url: "" +# health_metrics_enabled: false +# +# dogstatsd: +# url: "" +# +# logs: +# injection: false +# +# runtime: +# metrics_enabled: false +# +# appsec: +# enabled: false +# sca_enabled: false +# +# iast: +# enabled: false +# +# exception: +# replay_enabled: false +# +# llmobs: +# enabled: false +# ml_app: "" +# +# instrumentation: +# install_type: "" +# +# git: +# repository_url: "" +# commit_sha: "" +# +# main_package: "" diff --git a/dev-compose.yaml b/dev-compose.yaml index c1127aa0..f9fd8e77 100644 --- a/dev-compose.yaml +++ b/dev-compose.yaml @@ -1,6 +1,6 @@ services: letta_db: - image: ankane/pgvector:v0.5.1 + image: pgvector/pgvector:0.8.1-pg15 networks: default: aliases: diff --git a/fern/openapi.json b/fern/openapi.json index 42154985..e51cc200 100644 --- a/fern/openapi.json +++ b/fern/openapi.json @@ -4694,6 +4694,18 @@ "title": "Conversation Id" }, "description": "Conversation ID to export. If provided, uses messages from this conversation instead of the agent's global message history." + }, + { + "name": "scrub_messages", + "in": "query", + "required": false, + "schema": { + "type": "boolean", + "description": "If True, excludes all messages from the export. Useful for sharing agent configs without conversation history.", + "default": false, + "title": "Scrub Messages" + }, + "description": "If True, excludes all messages from the export. Useful for sharing agent configs without conversation history." } ], "requestBody": { @@ -4727,6 +4739,62 @@ } } } + }, + "post": { + "tags": ["agents"], + "summary": "Export Agent With Skills", + "description": "Export the serialized JSON representation of an agent with optional skills.\n\nThis POST endpoint allows including skills in the export by providing them in the request body.\nSkills are resolved client-side and passed as SkillSchema objects containing the skill files.", + "operationId": "export_agent_with_skills", + "parameters": [ + { + "name": "agent_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "title": "Agent Id" + } + } + ], + "requestBody": { + "content": { + "application/json": { + "schema": { + "anyOf": [ + { + "$ref": "#/components/schemas/ExportAgentRequest" + }, + { + "type": "null" + } + ], + "title": "Request" + } + } + } + }, + "responses": { + "200": { + "description": "Successful Response", + "content": { + "application/json": { + "schema": { + "type": "string" + } + } + } + }, + "422": { + "description": "Validation Error", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/HTTPValidationError" + } + } + } + } + } } }, "/v1/agents/import": { @@ -6661,6 +6729,151 @@ } } }, + "/v1/agents/{agent_id}/recompile": { + "post": { + "tags": ["agents"], + "summary": "Recompile Agent", + "description": "Manually trigger system prompt recompilation for an agent.", + "operationId": "recompile_agent", + "parameters": [ + { + "name": "agent_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "minLength": 42, + "maxLength": 42, + "pattern": "^agent-[0-9a-f]{8}-[0-9a-f]{4}-4[0-9a-f]{3}-[89ab][0-9a-f]{3}-[0-9a-f]{12}$", + "description": "The ID of the agent in the format 'agent-'", + "examples": ["agent-123e4567-e89b-42d3-8456-426614174000"], + "title": "Agent Id" + }, + "description": "The ID of the agent in the format 'agent-'" + }, + { + "name": "update_timestamp", + "in": "query", + "required": false, + "schema": { + "type": "boolean", + "description": "If True, update the in-context memory last edit timestamp embedded in the system prompt.", + "default": false, + "title": "Update Timestamp" + }, + "description": "If True, update the in-context memory last edit timestamp embedded in the system prompt." + }, + { + "name": "dry_run", + "in": "query", + "required": false, + "schema": { + "type": "boolean", + "description": "If True, do not persist changes; still returns the compiled system prompt.", + "default": false, + "title": "Dry Run" + }, + "description": "If True, do not persist changes; still returns the compiled system prompt." + } + ], + "responses": { + "200": { + "description": "Successful Response", + "content": { + "application/json": { + "schema": { + "type": "string", + "title": "Response Recompile Agent" + } + } + } + }, + "422": { + "description": "Validation Error", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/HTTPValidationError" + } + } + } + } + } + } + }, + "/v1/agents/{agent_id}/system-prompt/recompile": { + "post": { + "tags": ["agents"], + "summary": "Recompile Agent System Prompt", + "description": "Deprecated alias for POST /v1/agents/{agent_id}/recompile.", + "operationId": "recompile_agent_system_prompt", + "deprecated": true, + "parameters": [ + { + "name": "agent_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "minLength": 42, + "maxLength": 42, + "pattern": "^agent-[0-9a-f]{8}-[0-9a-f]{4}-4[0-9a-f]{3}-[89ab][0-9a-f]{3}-[0-9a-f]{12}$", + "description": "The ID of the agent in the format 'agent-'", + "examples": ["agent-123e4567-e89b-42d3-8456-426614174000"], + "title": "Agent Id" + }, + "description": "The ID of the agent in the format 'agent-'" + }, + { + "name": "update_timestamp", + "in": "query", + "required": false, + "schema": { + "type": "boolean", + "description": "If True, update the in-context memory last edit timestamp embedded in the system prompt.", + "default": false, + "title": "Update Timestamp" + }, + "description": "If True, update the in-context memory last edit timestamp embedded in the system prompt." + }, + { + "name": "dry_run", + "in": "query", + "required": false, + "schema": { + "type": "boolean", + "description": "If True, do not persist changes; still returns the compiled system prompt.", + "default": false, + "title": "Dry Run" + }, + "description": "If True, do not persist changes; still returns the compiled system prompt." + } + ], + "responses": { + "200": { + "description": "Successful Response", + "content": { + "application/json": { + "schema": { + "type": "string", + "title": "Response Recompile Agent System Prompt" + } + } + } + }, + "422": { + "description": "Validation Error", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/HTTPValidationError" + } + } + } + } + } + } + }, "/v1/agents/{agent_id}/core-memory/blocks/attach/{block_id}": { "patch": { "tags": ["agents"], @@ -7635,7 +7848,7 @@ "post": { "tags": ["agents"], "summary": "Send Message", - "description": "Process a user message and return the agent's response.\nThis endpoint accepts a message from a user and processes it through the agent.\n\nThe response format is controlled by the `streaming` field in the request body:\n- If `streaming=false` (default): Returns a complete LettaResponse with all messages\n- If `streaming=true`: Returns a Server-Sent Events (SSE) stream\n\nAdditional streaming options (only used when streaming=true):\n- `stream_tokens`: Stream individual tokens instead of complete steps\n- `include_pings`: Include keepalive pings to prevent connection timeouts\n- `background`: Process the request in the background", + "description": "Process a user message and return the agent's response.\nThis endpoint accepts a message from a user and processes it through the agent.\n\n**Note:** Sending multiple concurrent requests to the same agent can lead to undefined behavior.\nEach agent processes messages sequentially, and concurrent requests may interleave in unexpected ways.\nWait for each request to complete before sending the next one. Use separate agents or conversations for parallel processing.\n\nThe response format is controlled by the `streaming` field in the request body:\n- If `streaming=false` (default): Returns a complete LettaResponse with all messages\n- If `streaming=true`: Returns a Server-Sent Events (SSE) stream\n\nAdditional streaming options (only used when streaming=true):\n- `stream_tokens`: Stream individual tokens instead of complete steps\n- `include_pings`: Include keepalive pings to prevent connection timeouts\n- `background`: Process the request in the background", "operationId": "send_message", "parameters": [ { @@ -7806,8 +8019,8 @@ "assistant_message": "#/components/schemas/AssistantMessage", "approval_request_message": "#/components/schemas/ApprovalRequestMessage", "approval_response_message": "#/components/schemas/ApprovalResponseMessage", - "summary": "#/components/schemas/SummaryMessage", - "event": "#/components/schemas/EventMessage" + "summary_message": "#/components/schemas/SummaryMessage", + "event_message": "#/components/schemas/EventMessage" } }, "title": "Response Modify Message" @@ -7832,7 +8045,7 @@ "post": { "tags": ["agents"], "summary": "Send Message Streaming", - "description": "Process a user message and return the agent's response.\n\nDeprecated: Use the `POST /{agent_id}/messages` endpoint with `streaming=true` in the request body instead.\n\nThis endpoint accepts a message from a user and processes it through the agent.\nIt will stream the steps of the response always, and stream the tokens if 'stream_tokens' is set to True.", + "description": "Process a user message and return the agent's response.\n\nDeprecated: Use the `POST /{agent_id}/messages` endpoint with `streaming=true` in the request body instead.\n\n**Note:** Sending multiple concurrent requests to the same agent can lead to undefined behavior.\nEach agent processes messages sequentially, and concurrent requests may interleave in unexpected ways.\nWait for each request to complete before sending the next one. Use separate agents or conversations for parallel processing.\n\nThis endpoint accepts a message from a user and processes it through the agent.\nIt will stream the steps of the response always, and stream the tokens if 'stream_tokens' is set to True.", "operationId": "create_agent_message_stream", "deprecated": true, "parameters": [ @@ -7947,6 +8160,62 @@ } } }, + "/v1/agents/{agent_id}/generate": { + "post": { + "tags": ["agents"], + "summary": "Generate Completion", + "description": "Generate a completion directly from the LLM provider using the agent's configuration.\n\nThis endpoint makes a direct request to the LLM provider without any agent processing:\n- No memory or context retrieval\n- No tool calling\n- No message persistence\n- No agent state modification\n\nSimply provide a prompt, and the endpoint formats it as a user message.\nOptionally include a system_prompt for context/instructions.\n\nThe agent's LLM configuration (model, credentials, settings) is used by default.\nUse override_model to switch to a different model/provider while still using\nthe organization's configured providers.\n\nExample use cases:\n- Quick LLM queries without agent overhead\n- Testing different models with the same prompt\n- Simple chat completions using agent's credentials\n- Comparing model outputs on identical prompts", + "operationId": "generate_completion", + "parameters": [ + { + "name": "agent_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "minLength": 42, + "maxLength": 42, + "pattern": "^agent-[0-9a-f]{8}-[0-9a-f]{4}-4[0-9a-f]{3}-[89ab][0-9a-f]{3}-[0-9a-f]{12}$", + "description": "The ID of the agent in the format 'agent-'", + "examples": ["agent-123e4567-e89b-42d3-8456-426614174000"], + "title": "Agent Id" + }, + "description": "The ID of the agent in the format 'agent-'" + } + ], + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/GenerateRequest" + } + } + } + }, + "responses": { + "200": { + "description": "Successful generation", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/GenerateResponse" + } + } + } + }, + "404": { + "description": "Agent not found" + }, + "422": { + "description": "Invalid request parameters" + }, + "502": { + "description": "LLM provider error" + } + } + } + }, "/v1/agents/messages/search": { "post": { "tags": ["agents"], @@ -7996,7 +8265,7 @@ "post": { "tags": ["agents"], "summary": "Send Message Async", - "description": "Asynchronously process a user message and return a run object.\nThe actual processing happens in the background, and the status can be checked using the run ID.\n\nThis is \"asynchronous\" in the sense that it's a background run and explicitly must be fetched by the run ID.", + "description": "Asynchronously process a user message and return a run object.\nThe actual processing happens in the background, and the status can be checked using the run ID.\n\nThis is \"asynchronous\" in the sense that it's a background run and explicitly must be fetched by the run ID.\n\n**Note:** Sending multiple concurrent requests to the same agent can lead to undefined behavior.\nEach agent processes messages sequentially, and concurrent requests may interleave in unexpected ways.\nWait for each request to complete before sending the next one. Use separate agents or conversations for parallel processing.", "operationId": "create_agent_message_async", "parameters": [ { @@ -8449,19 +8718,26 @@ "get": { "tags": ["conversations"], "summary": "List Conversations", - "description": "List all conversations for an agent.", + "description": "List all conversations for an agent (or all conversations if agent_id not provided).", "operationId": "list_conversations", "parameters": [ { "name": "agent_id", "in": "query", - "required": true, + "required": false, "schema": { - "type": "string", - "description": "The agent ID to list conversations for", + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "description": "The agent ID to list conversations for (optional - returns all conversations if not provided)", "title": "Agent Id" }, - "description": "The agent ID to list conversations for" + "description": "The agent ID to list conversations for (optional - returns all conversations if not provided)" }, { "name": "limit", @@ -8510,6 +8786,32 @@ "title": "Summary Search" }, "description": "Search for text within conversation summaries" + }, + { + "name": "order", + "in": "query", + "required": false, + "schema": { + "enum": ["asc", "desc"], + "type": "string", + "description": "Sort order for conversations. 'asc' for oldest first, 'desc' for newest first", + "default": "desc", + "title": "Order" + }, + "description": "Sort order for conversations. 'asc' for oldest first, 'desc' for newest first" + }, + { + "name": "order_by", + "in": "query", + "required": false, + "schema": { + "enum": ["created_at", "last_run_completion"], + "type": "string", + "description": "Field to sort by", + "default": "created_at", + "title": "Order By" + }, + "description": "Field to sort by" } ], "responses": { @@ -8646,6 +8948,52 @@ } } } + }, + "delete": { + "tags": ["conversations"], + "summary": "Delete Conversation", + "description": "Delete a conversation (soft delete).\n\nThis marks the conversation as deleted but does not permanently remove it from the database.\nThe conversation will no longer appear in list operations.\nAny isolated blocks associated with the conversation will be permanently deleted.", + "operationId": "delete_conversation", + "parameters": [ + { + "name": "conversation_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "minLength": 1, + "maxLength": 41, + "pattern": "^(default|conv-[0-9a-f]{8}-[0-9a-f]{4}-4[0-9a-f]{3}-[89ab][0-9a-f]{3}-[0-9a-f]{12})$", + "description": "The conversation identifier. Either the special value 'default' or an ID in the format 'conv-'", + "examples": [ + "default", + "conv-123e4567-e89b-42d3-8456-426614174000" + ], + "title": "Conversation Id" + }, + "description": "The conversation identifier. Either the special value 'default' or an ID in the format 'conv-'" + } + ], + "responses": { + "200": { + "description": "Successful Response", + "content": { + "application/json": { + "schema": {} + } + } + }, + "422": { + "description": "Validation Error", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/HTTPValidationError" + } + } + } + } + } } }, "/v1/conversations/{conversation_id}/messages": { @@ -9559,12 +9907,12 @@ } } }, - "/v1/groups/{group_id}/messages": { - "post": { + "/v1/groups/{group_id}/messages/{message_id}": { + "patch": { "tags": ["groups"], - "summary": "Send Group Message", - "description": "Process a user message and return the group's response.\nThis endpoint accepts a message from a user and processes it through through agents in the group based on the specified pattern", - "operationId": "send_group_message", + "summary": "Modify Group Message", + "description": "Update the details of a message associated with an agent.", + "operationId": "modify_group_message", "deprecated": true, "parameters": [ { @@ -9581,6 +9929,21 @@ "title": "Group Id" }, "description": "The ID of the group in the format 'group-'" + }, + { + "name": "message_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "minLength": 44, + "maxLength": 44, + "pattern": "^message-[0-9a-f]{8}-[0-9a-f]{4}-4[0-9a-f]{3}-[89ab][0-9a-f]{3}-[0-9a-f]{12}$", + "description": "The ID of the message in the format 'message-'", + "examples": ["message-123e4567-e89b-42d3-8456-426614174000"], + "title": "Message Id" + }, + "description": "The ID of the message in the format 'message-'" } ], "requestBody": { @@ -9588,7 +9951,21 @@ "content": { "application/json": { "schema": { - "$ref": "#/components/schemas/LettaRequest" + "anyOf": [ + { + "$ref": "#/components/schemas/UpdateSystemMessage" + }, + { + "$ref": "#/components/schemas/UpdateUserMessage" + }, + { + "$ref": "#/components/schemas/UpdateReasoningMessage" + }, + { + "$ref": "#/components/schemas/UpdateAssistantMessage" + } + ], + "title": "Request" } } } @@ -9599,7 +9976,58 @@ "content": { "application/json": { "schema": { - "$ref": "#/components/schemas/LettaResponse" + "oneOf": [ + { + "$ref": "#/components/schemas/SystemMessage" + }, + { + "$ref": "#/components/schemas/UserMessage" + }, + { + "$ref": "#/components/schemas/ReasoningMessage" + }, + { + "$ref": "#/components/schemas/HiddenReasoningMessage" + }, + { + "$ref": "#/components/schemas/ToolCallMessage" + }, + { + "$ref": "#/components/schemas/ToolReturnMessage" + }, + { + "$ref": "#/components/schemas/AssistantMessage" + }, + { + "$ref": "#/components/schemas/ApprovalRequestMessage" + }, + { + "$ref": "#/components/schemas/ApprovalResponseMessage" + }, + { + "$ref": "#/components/schemas/SummaryMessage" + }, + { + "$ref": "#/components/schemas/EventMessage" + } + ], + "discriminator": { + "propertyName": "message_type", + "mapping": { + "system_message": "#/components/schemas/SystemMessage", + "user_message": "#/components/schemas/UserMessage", + "reasoning_message": "#/components/schemas/ReasoningMessage", + "hidden_reasoning_message": "#/components/schemas/HiddenReasoningMessage", + "tool_call_message": "#/components/schemas/ToolCallMessage", + "tool_return_message": "#/components/schemas/ToolReturnMessage", + "assistant_message": "#/components/schemas/AssistantMessage", + "approval_request_message": "#/components/schemas/ApprovalRequestMessage", + "approval_response_message": "#/components/schemas/ApprovalResponseMessage", + "summary_message": "#/components/schemas/SummaryMessage", + "event_message": "#/components/schemas/EventMessage" + } + }, + "title": "Response Modify Group Message" } } } @@ -9615,7 +10043,9 @@ } } } - }, + } + }, + "/v1/groups/{group_id}/messages": { "get": { "tags": ["groups"], "summary": "List Group Messages", @@ -9790,203 +10220,6 @@ } } }, - "/v1/groups/{group_id}/messages/stream": { - "post": { - "tags": ["groups"], - "summary": "Send Group Message Streaming", - "description": "Process a user message and return the group's responses.\nThis endpoint accepts a message from a user and processes it through agents in the group based on the specified pattern.\nIt will stream the steps of the response always, and stream the tokens if 'stream_tokens' is set to True.", - "operationId": "send_group_message_streaming", - "deprecated": true, - "parameters": [ - { - "name": "group_id", - "in": "path", - "required": true, - "schema": { - "type": "string", - "minLength": 42, - "maxLength": 42, - "pattern": "^group-[0-9a-f]{8}-[0-9a-f]{4}-4[0-9a-f]{3}-[89ab][0-9a-f]{3}-[0-9a-f]{12}$", - "description": "The ID of the group in the format 'group-'", - "examples": ["group-123e4567-e89b-42d3-8456-426614174000"], - "title": "Group Id" - }, - "description": "The ID of the group in the format 'group-'" - } - ], - "requestBody": { - "required": true, - "content": { - "application/json": { - "schema": { - "$ref": "#/components/schemas/LettaStreamingRequest" - } - } - } - }, - "responses": { - "200": { - "description": "Successful response", - "content": { - "application/json": { - "schema": {} - }, - "text/event-stream": { - "description": "Server-Sent Events stream" - } - } - }, - "422": { - "description": "Validation Error", - "content": { - "application/json": { - "schema": { - "$ref": "#/components/schemas/HTTPValidationError" - } - } - } - } - } - } - }, - "/v1/groups/{group_id}/messages/{message_id}": { - "patch": { - "tags": ["groups"], - "summary": "Modify Group Message", - "description": "Update the details of a message associated with an agent.", - "operationId": "modify_group_message", - "deprecated": true, - "parameters": [ - { - "name": "group_id", - "in": "path", - "required": true, - "schema": { - "type": "string", - "minLength": 42, - "maxLength": 42, - "pattern": "^group-[0-9a-f]{8}-[0-9a-f]{4}-4[0-9a-f]{3}-[89ab][0-9a-f]{3}-[0-9a-f]{12}$", - "description": "The ID of the group in the format 'group-'", - "examples": ["group-123e4567-e89b-42d3-8456-426614174000"], - "title": "Group Id" - }, - "description": "The ID of the group in the format 'group-'" - }, - { - "name": "message_id", - "in": "path", - "required": true, - "schema": { - "type": "string", - "minLength": 44, - "maxLength": 44, - "pattern": "^message-[0-9a-f]{8}-[0-9a-f]{4}-4[0-9a-f]{3}-[89ab][0-9a-f]{3}-[0-9a-f]{12}$", - "description": "The ID of the message in the format 'message-'", - "examples": ["message-123e4567-e89b-42d3-8456-426614174000"], - "title": "Message Id" - }, - "description": "The ID of the message in the format 'message-'" - } - ], - "requestBody": { - "required": true, - "content": { - "application/json": { - "schema": { - "anyOf": [ - { - "$ref": "#/components/schemas/UpdateSystemMessage" - }, - { - "$ref": "#/components/schemas/UpdateUserMessage" - }, - { - "$ref": "#/components/schemas/UpdateReasoningMessage" - }, - { - "$ref": "#/components/schemas/UpdateAssistantMessage" - } - ], - "title": "Request" - } - } - } - }, - "responses": { - "200": { - "description": "Successful Response", - "content": { - "application/json": { - "schema": { - "oneOf": [ - { - "$ref": "#/components/schemas/SystemMessage" - }, - { - "$ref": "#/components/schemas/UserMessage" - }, - { - "$ref": "#/components/schemas/ReasoningMessage" - }, - { - "$ref": "#/components/schemas/HiddenReasoningMessage" - }, - { - "$ref": "#/components/schemas/ToolCallMessage" - }, - { - "$ref": "#/components/schemas/ToolReturnMessage" - }, - { - "$ref": "#/components/schemas/AssistantMessage" - }, - { - "$ref": "#/components/schemas/ApprovalRequestMessage" - }, - { - "$ref": "#/components/schemas/ApprovalResponseMessage" - }, - { - "$ref": "#/components/schemas/SummaryMessage" - }, - { - "$ref": "#/components/schemas/EventMessage" - } - ], - "discriminator": { - "propertyName": "message_type", - "mapping": { - "system_message": "#/components/schemas/SystemMessage", - "user_message": "#/components/schemas/UserMessage", - "reasoning_message": "#/components/schemas/ReasoningMessage", - "hidden_reasoning_message": "#/components/schemas/HiddenReasoningMessage", - "tool_call_message": "#/components/schemas/ToolCallMessage", - "tool_return_message": "#/components/schemas/ToolReturnMessage", - "assistant_message": "#/components/schemas/AssistantMessage", - "approval_request_message": "#/components/schemas/ApprovalRequestMessage", - "approval_response_message": "#/components/schemas/ApprovalResponseMessage", - "summary": "#/components/schemas/SummaryMessage", - "event": "#/components/schemas/EventMessage" - } - }, - "title": "Response Modify Group Message" - } - } - } - }, - "422": { - "description": "Validation Error", - "content": { - "application/json": { - "schema": { - "$ref": "#/components/schemas/HTTPValidationError" - } - } - } - } - } - } - }, "/v1/groups/{group_id}/reset-messages": { "patch": { "tags": ["groups"], @@ -16537,8 +16770,8 @@ "assistant_message": "#/components/schemas/AssistantMessage", "approval_request_message": "#/components/schemas/ApprovalRequestMessage", "approval_response_message": "#/components/schemas/ApprovalResponseMessage", - "summary": "#/components/schemas/SummaryMessage", - "event": "#/components/schemas/EventMessage" + "summary_message": "#/components/schemas/SummaryMessage", + "event_message": "#/components/schemas/EventMessage" } } }, @@ -17678,7 +17911,7 @@ }, "sortBy": { "type": "string", - "enum": ["created_at", "last_run_completion"] + "enum": ["created_at", "last_run_completion", "updated_at"] }, "ascending": { "type": "boolean" @@ -17715,6 +17948,161 @@ } } }, + "/v1/agents/search/count": { + "get": { + "description": "Count deployed agents matching search criteria", + "summary": "Count Deployed Agents", + "tags": ["agents"], + "parameters": [ + { + "name": "search", + "in": "query", + "schema": { + "type": "array", + "items": { + "oneOf": [ + { + "type": "object", + "properties": { + "field": { + "type": "string", + "enum": ["version"] + }, + "value": { + "type": "string" + } + }, + "required": ["field", "value"] + }, + { + "type": "object", + "properties": { + "field": { + "type": "string", + "enum": ["name"] + }, + "operator": { + "type": "string", + "enum": ["eq", "contains"] + }, + "value": { + "type": "string" + } + }, + "required": ["field", "operator", "value"] + }, + { + "type": "object", + "properties": { + "field": { + "type": "string", + "enum": ["tags"] + }, + "operator": { + "type": "string", + "enum": ["contains"] + }, + "value": { + "type": "array", + "items": { + "type": "string" + } + } + }, + "required": ["field", "operator", "value"] + }, + { + "type": "object", + "properties": { + "field": { + "type": "string", + "enum": ["identity"] + }, + "operator": { + "type": "string", + "enum": ["eq"] + }, + "value": { + "type": "string" + } + }, + "required": ["field", "operator", "value"] + }, + { + "type": "object", + "properties": { + "field": { + "type": "string", + "enum": ["templateName"] + }, + "operator": { + "type": "string", + "enum": ["eq"] + }, + "value": { + "type": "string" + } + }, + "required": ["field", "operator", "value"] + }, + { + "type": "object", + "properties": { + "field": { + "type": "string", + "enum": ["agentId"] + }, + "operator": { + "type": "string", + "enum": ["eq"] + }, + "value": { + "type": "string" + } + }, + "required": ["field", "operator", "value"] + } + ] + } + } + }, + { + "name": "project_id", + "in": "query", + "schema": { + "type": "string" + } + }, + { + "name": "combinator", + "in": "query", + "schema": { + "type": "string", + "enum": ["AND"] + } + } + ], + "operationId": "agents.countDeployedAgents", + "responses": { + "200": { + "description": "200", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "count": { + "type": "number" + } + }, + "required": ["count"] + } + } + } + } + } + } + }, "/v1/agents/{agent_id}/core-memory/variables": { "get": { "description": "Get the variables associated with an agent", @@ -18363,6 +18751,129 @@ } } }, + "/v1/templates/{template_name}/save": { + "post": { + "description": "Saves the current version of the template as a new version", + "summary": "Save template version (Cloud-only)", + "tags": ["templates"], + "parameters": [ + { + "name": "template_name", + "in": "path", + "required": true, + "schema": { + "type": "string" + }, + "description": "The template version, formatted as {template-name}, any version appended will be ignored" + } + ], + "operationId": "templates.saveTemplateVersionNoProject", + "requestBody": { + "description": "Body", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "preserve_environment_variables_on_migration": { + "type": "boolean", + "description": "If true, the environment variables will be preserved in the template version when migrating agents" + }, + "preserve_core_memories_on_migration": { + "type": "boolean", + "description": "If true, the core memories will be preserved in the template version when migrating agents" + }, + "preserve_sources_on_migration": { + "type": "boolean", + "description": "If true, existing agent folders/sources will be preserved and merged with template sources during migration. If false, agent sources will be replaced with template sources." + }, + "block_reconciliation_strategy": { + "type": "string", + "enum": ["reconcile-all", "preserve-deleted"], + "description": "Strategy for reconciling memory blocks during migration: \"reconcile-all\" deletes blocks not in the template, \"preserve-deleted\" keeps them. Defaults to \"preserve-deleted\"." + }, + "migrate_agents": { + "type": "boolean", + "description": "If true, existing agents attached to this template will be migrated to the new template version" + }, + "message": { + "type": "string", + "description": "A message to describe the changes made in this template version" + } + } + } + } + } + }, + "responses": { + "200": { + "description": "200", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "name": { + "type": "string", + "description": "The exact name of the template" + }, + "id": { + "type": "string" + }, + "project_id": { + "type": "string" + }, + "project_slug": { + "type": "string" + }, + "latest_version": { + "type": "string", + "description": "The latest version of the template" + }, + "description": { + "type": "string" + }, + "template_deployment_slug": { + "type": "string", + "description": "The full name of the template, including version and project slug" + }, + "updated_at": { + "type": "string", + "description": "When the template was last updated" + } + }, + "required": [ + "name", + "id", + "project_id", + "project_slug", + "latest_version", + "template_deployment_slug", + "updated_at" + ] + } + } + } + }, + "400": { + "description": "400", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "message": { + "type": "string" + } + }, + "required": ["message"] + } + } + } + } + } + } + }, "/v1/templates/{project_id}/{template_name}": { "post": { "description": "Saves the current version of the template as a new version", @@ -20146,6 +20657,225 @@ } } }, + "/v1/templates/{template_name}/rollback": { + "post": { + "description": "Rollback the current working version of a template to a previous saved version. If the current version has unsaved changes, they will be automatically saved as a new version before rollback.", + "summary": "Rollback template to previous version (Cloud-only)", + "tags": ["templates"], + "parameters": [ + { + "name": "template_name", + "in": "path", + "required": true, + "schema": { + "type": "string" + }, + "description": "The template name (without version)" + } + ], + "operationId": "templates.rollbackTemplateNoProject", + "requestBody": { + "description": "Body", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "version": { + "type": "string", + "description": "The target version to rollback to (e.g., \"1\", \"2\", \"latest\"). Cannot be \"current\" or \"dev\"." + } + }, + "required": ["version"] + } + } + } + }, + "responses": { + "200": { + "description": "200", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "success": { + "type": "boolean" + }, + "message": { + "type": "string" + } + }, + "required": ["success"] + } + } + } + }, + "400": { + "description": "400", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "message": { + "type": "string" + } + }, + "required": ["message"] + } + } + } + }, + "404": { + "description": "404", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "message": { + "type": "string" + } + }, + "required": ["message"] + } + } + } + }, + "500": { + "description": "500", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "message": { + "type": "string" + } + }, + "required": ["message"] + } + } + } + } + } + } + }, + "/v1/templates/{project_id}/{template_name}/rollback": { + "post": { + "description": "Rollback the current working version of a template to a previous saved version. If the current version has unsaved changes, they will be automatically saved as a new version before rollback.", + "summary": "Rollback template to previous version (Cloud-only)", + "tags": ["templates"], + "parameters": [ + { + "name": "project_id", + "in": "path", + "required": true, + "schema": { + "type": "string" + }, + "description": "The project id" + }, + { + "name": "template_name", + "in": "path", + "required": true, + "schema": { + "type": "string" + }, + "description": "The template name (without version)" + } + ], + "operationId": "templates.rollbackTemplate", + "requestBody": { + "description": "Body", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "version": { + "type": "string", + "description": "The target version to rollback to (e.g., \"1\", \"2\", \"latest\"). Cannot be \"current\" or \"dev\"." + } + }, + "required": ["version"] + } + } + } + }, + "responses": { + "200": { + "description": "200", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "success": { + "type": "boolean" + }, + "message": { + "type": "string" + } + }, + "required": ["success"] + } + } + } + }, + "400": { + "description": "400", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "message": { + "type": "string" + } + }, + "required": ["message"] + } + } + } + }, + "404": { + "description": "404", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "message": { + "type": "string" + } + }, + "required": ["message"] + } + } + } + }, + "500": { + "description": "500", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "message": { + "type": "string" + } + }, + "required": ["message"] + } + } + } + } + } + } + }, "/v1/templates/{project_id}/{template_name}/agent-file": { "put": { "description": "Updates the current working version of a template from an agent file", @@ -20941,6 +21671,64 @@ }, "settings": { "type": "string" + }, + "local_time": { + "type": "string" + }, + "device_type": { + "type": "string" + }, + "cwd": { + "type": "string" + }, + "total_api_ms": { + "type": "number" + }, + "total_wall_ms": { + "type": "number" + }, + "step_count": { + "type": "number" + }, + "prompt_tokens": { + "type": "number" + }, + "completion_tokens": { + "type": "number" + }, + "total_tokens": { + "type": "number" + }, + "cached_input_tokens": { + "type": "number" + }, + "cache_write_tokens": { + "type": "number" + }, + "reasoning_tokens": { + "type": "number" + }, + "context_tokens": { + "type": "number" + }, + "agent_name": { + "type": "string" + }, + "agent_description": { + "type": "string" + }, + "model": { + "type": "string" + }, + "billing_tier": { + "type": "string" + }, + "recent_chunks": { + "type": "array", + "items": { + "type": "object", + "additionalProperties": {} + } } }, "required": ["message"] @@ -23426,6 +24214,322 @@ } } }, + "/v1/agents/{agent_id}/memory-files/directory": { + "get": { + "description": "List immediate children of a directory in the agent memory repo (single level).", + "summary": "List Directory", + "tags": ["memoryFiles"], + "parameters": [ + { + "name": "agent_id", + "in": "path", + "required": true, + "schema": { + "type": "string" + } + }, + { + "name": "path", + "in": "query", + "description": "Directory path to list. Empty for root.", + "schema": { + "type": "string" + } + }, + { + "name": "depth", + "in": "query", + "description": "Depth of directory listing (default: 1).", + "schema": { + "type": "number", + "nullable": true + } + }, + { + "name": "ref", + "in": "query", + "description": "Git ref (default: HEAD).", + "schema": { + "type": "string" + } + } + ], + "operationId": "memoryFiles.listDirectory", + "responses": { + "200": { + "description": "200", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "path": { + "type": "string" + }, + "entries": { + "type": "array", + "items": { + "type": "object", + "properties": { + "name": { + "type": "string" + }, + "type": { + "type": "string", + "enum": ["file", "directory"] + } + }, + "required": ["name", "type"] + } + }, + "depth": { + "type": "number" + } + }, + "required": ["path", "entries", "depth"] + } + } + } + }, + "404": { + "description": "404", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "message": { + "type": "string" + } + }, + "required": ["message"] + } + } + } + }, + "501": { + "description": "501", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "message": { + "type": "string" + } + }, + "required": ["message"] + } + } + } + } + } + } + }, + "/v1/agents/{agent_id}/memory-files/history": { + "get": { + "description": "Get commit history for a specific file in the agent memory repo.", + "summary": "Get File History", + "tags": ["memoryFiles"], + "parameters": [ + { + "name": "agent_id", + "in": "path", + "required": true, + "schema": { + "type": "string" + } + }, + { + "name": "file_path", + "in": "query", + "description": "Path to the file (e.g. \"blocks/persona.md\").", + "required": true, + "schema": { + "type": "string" + } + }, + { + "name": "limit", + "in": "query", + "description": "Max commits to return (default: 50).", + "schema": { + "type": "number", + "nullable": true + } + } + ], + "operationId": "memoryFiles.getFileHistory", + "responses": { + "200": { + "description": "200", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "path": { + "type": "string" + }, + "commits": { + "type": "array", + "items": { + "type": "object", + "properties": { + "sha": { + "type": "string" + }, + "message": { + "type": "string" + }, + "timestamp": { + "type": "string" + }, + "author_name": { + "type": "string", + "nullable": true + } + }, + "required": [ + "sha", + "message", + "timestamp", + "author_name" + ] + } + } + }, + "required": ["path", "commits"] + } + } + } + }, + "404": { + "description": "404", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "message": { + "type": "string" + } + }, + "required": ["message"] + } + } + } + }, + "501": { + "description": "501", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "message": { + "type": "string" + } + }, + "required": ["message"] + } + } + } + } + } + } + }, + "/v1/agents/{agent_id}/memory-files/content": { + "get": { + "description": "Read a single file content at a specific git ref from the agent memory repo.", + "summary": "Read File Content", + "tags": ["memoryFiles"], + "parameters": [ + { + "name": "agent_id", + "in": "path", + "required": true, + "schema": { + "type": "string" + } + }, + { + "name": "file_path", + "in": "query", + "description": "Path to the file (e.g. \"blocks/persona.md\").", + "required": true, + "schema": { + "type": "string" + } + }, + { + "name": "ref", + "in": "query", + "description": "Git ref (default: HEAD).", + "schema": { + "type": "string" + } + } + ], + "operationId": "memoryFiles.readFileContent", + "responses": { + "200": { + "description": "200", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "path": { + "type": "string" + }, + "content": { + "type": "string" + }, + "ref": { + "type": "string" + } + }, + "required": ["path", "content", "ref"] + } + } + } + }, + "404": { + "description": "404", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "message": { + "type": "string" + } + }, + "required": ["message"] + } + } + } + }, + "501": { + "description": "501", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "message": { + "type": "string" + } + }, + "required": ["message"] + } + } + } + } + } + } + }, "/v1/pipelines": { "post": { "description": "Create a new pipeline (producer + feed + optionally subscribers)", @@ -25131,6 +26235,507 @@ } } } + }, + "/v1/listeners/register": { + "post": { + "description": "Register a new listener connection and get connectionId for WebSocket", + "summary": "Register Listener", + "tags": ["listeners"], + "parameters": [], + "operationId": "listeners.register", + "requestBody": { + "description": "Body", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "deviceId": { + "type": "string" + }, + "connectionName": { + "type": "string" + }, + "agentId": { + "type": "string" + } + }, + "required": ["deviceId", "connectionName"] + } + } + } + }, + "responses": { + "200": { + "description": "200", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "connectionId": { + "type": "string" + }, + "wsUrl": { + "type": "string" + } + }, + "required": ["connectionId", "wsUrl"] + } + } + } + }, + "400": { + "description": "400", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "errorCode": { + "type": "string" + }, + "message": { + "type": "string" + } + }, + "required": ["errorCode", "message"] + } + } + } + } + } + } + }, + "/v1/listeners": { + "get": { + "description": "List all active listener connections for the organization", + "summary": "List Listener Connections", + "tags": ["listeners"], + "parameters": [ + { + "name": "limit", + "in": "query", + "schema": { + "type": "string" + } + }, + { + "name": "after", + "in": "query", + "schema": { + "type": "string" + } + }, + { + "name": "userId", + "in": "query", + "schema": { + "type": "string" + } + }, + { + "name": "agentId", + "in": "query", + "schema": { + "type": "string" + } + } + ], + "operationId": "listeners.listConnections", + "responses": { + "200": { + "description": "200", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "connections": { + "type": "array", + "items": { + "type": "object", + "properties": { + "connectionId": { + "type": "string" + }, + "deviceId": { + "type": "string" + }, + "connectionName": { + "type": "string" + }, + "organizationId": { + "type": "string" + }, + "userId": { + "type": "string" + }, + "apiKeyOwner": { + "type": "string" + }, + "agentId": { + "type": "string" + }, + "podId": { + "type": "string" + }, + "connectedAt": { + "type": "number" + }, + "lastHeartbeat": { + "type": "number" + }, + "currentMode": { + "type": "string", + "enum": [ + "default", + "acceptEdits", + "plan", + "bypassPermissions" + ] + } + }, + "required": [ + "connectionId", + "deviceId", + "connectionName", + "organizationId", + "podId", + "connectedAt", + "lastHeartbeat" + ] + } + }, + "hasNextPage": { + "type": "boolean" + } + }, + "required": ["connections", "hasNextPage"] + } + } + } + } + } + } + }, + "/v1/listeners/{connectionId}/messages": { + "post": { + "description": "Send a message to a specific listener connection", + "summary": "Send Message to Listener", + "tags": ["listeners"], + "parameters": [ + { + "name": "connectionId", + "in": "path", + "required": true, + "schema": { + "type": "string" + } + } + ], + "operationId": "listeners.sendMessage", + "requestBody": { + "description": "Body", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "messages": { + "type": "array", + "items": { + "oneOf": [ + { + "type": "object", + "properties": { + "role": { + "type": "string", + "enum": ["user"] + }, + "content": { + "oneOf": [ + { + "type": "string" + }, + { + "type": "array", + "items": { + "type": "object", + "properties": { + "type": { + "type": "string", + "enum": ["text"] + }, + "text": { + "type": "string" + } + }, + "required": ["type", "text"] + } + } + ] + }, + "otid": { + "type": "string" + } + }, + "required": ["role", "content"] + }, + { + "type": "object", + "properties": { + "type": { + "type": "string", + "enum": ["approval"] + }, + "approvals": { + "type": "array", + "items": { + "oneOf": [ + { + "type": "object", + "properties": { + "type": { + "type": "string", + "enum": ["tool"] + }, + "tool_call_id": { + "type": "string" + }, + "tool_return": { + "oneOf": [ + { + "type": "string" + }, + { + "type": "array", + "items": { + "type": "object", + "properties": { + "type": { + "type": "string", + "enum": ["text"] + }, + "text": { + "type": "string" + } + }, + "required": ["type", "text"] + } + } + ] + }, + "status": { + "type": "string", + "enum": ["success", "error"] + }, + "stdout": { + "type": "array", + "items": { + "type": "string" + }, + "nullable": true + }, + "stderr": { + "type": "array", + "items": { + "type": "string" + }, + "nullable": true + } + }, + "required": [ + "tool_call_id", + "tool_return", + "status" + ] + }, + { + "type": "object", + "properties": { + "type": { + "type": "string", + "enum": ["approval"] + }, + "approve": { + "type": "boolean" + }, + "tool_call_id": { + "type": "string" + }, + "reason": { + "type": "string", + "nullable": true + } + }, + "required": ["approve", "tool_call_id"] + } + ] + } + } + }, + "required": ["type", "approvals"] + } + ] + } + }, + "agentId": { + "type": "string" + }, + "conversationId": { + "type": "string", + "nullable": true + } + }, + "required": ["messages"] + } + } + } + }, + "responses": { + "200": { + "description": "200", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "success": { + "type": "boolean" + }, + "message": { + "type": "string" + }, + "runId": { + "type": "string" + } + }, + "required": ["success", "message"] + } + } + } + }, + "404": { + "description": "404", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "errorCode": { + "type": "string" + }, + "message": { + "type": "string" + } + }, + "required": ["errorCode", "message"] + } + } + } + }, + "503": { + "description": "503", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "errorCode": { + "type": "string" + }, + "message": { + "type": "string" + } + }, + "required": ["errorCode", "message"] + } + } + } + } + } + } + }, + "/v1/listeners/{connectionId}/mode": { + "post": { + "description": "Change the permission mode of a specific listener connection", + "summary": "Change Listener Mode", + "tags": ["listeners"], + "parameters": [ + { + "name": "connectionId", + "in": "path", + "required": true, + "schema": { + "type": "string" + } + } + ], + "operationId": "listeners.sendModeChange", + "requestBody": { + "description": "Body", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "mode": { + "type": "string", + "enum": [ + "default", + "acceptEdits", + "plan", + "bypassPermissions" + ] + } + }, + "required": ["mode"] + } + } + } + }, + "responses": { + "200": { + "description": "200", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "success": { + "type": "boolean" + }, + "message": { + "type": "string" + } + }, + "required": ["success", "message"] + } + } + } + }, + "404": { + "description": "404", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "errorCode": { + "type": "string" + }, + "message": { + "type": "string" + } + }, + "required": ["errorCode", "message"] + } + } + } + } + } + } } }, "components": { @@ -25394,6 +26999,14 @@ "title": "Mcp Servers", "description": "List of MCP servers in this agent file" }, + "skills": { + "items": { + "$ref": "#/components/schemas/SkillSchema" + }, + "type": "array", + "title": "Skills", + "description": "List of skills in this agent file" + }, "metadata": { "additionalProperties": { "type": "string" @@ -26107,7 +27720,7 @@ "type": "boolean", "title": "Parallel Tool Calls", "description": "Whether to enable parallel tool calling.", - "default": false + "default": true }, "provider_type": { "type": "string", @@ -27117,7 +28730,7 @@ "type": "boolean", "title": "Parallel Tool Calls", "description": "Whether to enable parallel tool calling.", - "default": false + "default": true }, "provider_type": { "type": "string", @@ -27430,7 +29043,7 @@ "type": "boolean", "title": "Parallel Tool Calls", "description": "Whether to enable parallel tool calling.", - "default": false + "default": true }, "provider_type": { "type": "string", @@ -29187,43 +30800,6 @@ "title": "ChatCompletionSystemMessageParam", "description": "Developer-provided instructions that the model should follow, regardless of\nmessages sent by the user. With o1 models and newer, use `developer` messages\nfor this purpose instead." }, - "ChatCompletionTokenLogprob": { - "properties": { - "token": { - "type": "string", - "title": "Token" - }, - "bytes": { - "anyOf": [ - { - "items": { - "type": "integer" - }, - "type": "array" - }, - { - "type": "null" - } - ], - "title": "Bytes" - }, - "logprob": { - "type": "number", - "title": "Logprob" - }, - "top_logprobs": { - "items": { - "$ref": "#/components/schemas/TopLogprob" - }, - "type": "array", - "title": "Top Logprobs" - } - }, - "additionalProperties": true, - "type": "object", - "required": ["token", "logprob", "top_logprobs"], - "title": "ChatCompletionTokenLogprob" - }, "ChatCompletionToolMessageParam": { "properties": { "content": { @@ -29310,7 +30886,7 @@ "type": "boolean", "title": "Parallel Tool Calls", "description": "Whether to enable parallel tool calling.", - "default": false + "default": true }, "provider_type": { "type": "string", @@ -29448,7 +31024,7 @@ "logprobs": { "anyOf": [ { - "$ref": "#/components/schemas/ChoiceLogprobs" + "$ref": "#/components/schemas/openai__types__chat__chat_completion__ChoiceLogprobs" }, { "type": "null" @@ -29464,42 +31040,6 @@ "required": ["finish_reason", "index", "message"], "title": "Choice" }, - "ChoiceLogprobs": { - "properties": { - "content": { - "anyOf": [ - { - "items": { - "$ref": "#/components/schemas/ChatCompletionTokenLogprob" - }, - "type": "array" - }, - { - "type": "null" - } - ], - "title": "Content" - }, - "refusal": { - "anyOf": [ - { - "items": { - "$ref": "#/components/schemas/ChatCompletionTokenLogprob" - }, - "type": "array" - }, - { - "type": "null" - } - ], - "title": "Refusal" - } - }, - "additionalProperties": true, - "type": "object", - "title": "ChoiceLogprobs", - "description": "Log probability information for the choice." - }, "ClientToolSchema": { "properties": { "name": { @@ -29602,9 +31142,16 @@ "CompactionSettings-Input": { "properties": { "model": { - "type": "string", + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], "title": "Model", - "description": "Model handle to use for summarization (format: provider/model-name)." + "description": "Model handle to use for sliding_window/all summarization (format: provider/model-name). If None, uses lightweight provider-specific defaults." }, "model_settings": { "anyOf": [ @@ -29677,10 +31224,16 @@ "description": "Optional model settings used to override defaults for the summarizer model." }, "prompt": { - "type": "string", + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], "title": "Prompt", - "description": "The prompt to use for summarization.", - "default": "The following messages are being evicted from your context window. Write a detailed summary that captures what happened in these messages.\n\nThis summary will appear BEFORE the remaining recent messages in context, providing background for what comes after. Include:\n\n1. **What happened**: The conversations, tasks, and exchanges that took place. What did the user ask for? What did you do? How did things progress?\n\n2. **High level goals**: If there is an existing summary in the transcript, make sure to take it into consideration to continue tracking the higher level goals and long-term progress. Make sure to not lose track of higher level goals or the ongoing task.\n\n3. **Important details**: Specific names, data, configurations, or facts that were discussed. Don't omit details that might be referenced later.\n\n4. **Lookup hints**: For any detailed content (long lists, extensive data, specific conversations) that couldn't fit in the summary, note the topic and key terms that could be used to find it in message history later.\n\nWrite in first person as a factual record of what occurred. Be thorough and detailed - the goal is to preserve enough context that the recent messages make sense and important information isn't lost.\n\nKeep your summary under 250 words. Only output the summary." + "description": "The prompt to use for summarization. If None, uses mode-specific default." }, "prompt_acknowledgement": { "type": "boolean", @@ -29703,7 +31256,12 @@ }, "mode": { "type": "string", - "enum": ["all", "sliding_window"], + "enum": [ + "all", + "sliding_window", + "self_compact_all", + "self_compact_sliding_window" + ], "title": "Mode", "description": "The type of summarization technique use.", "default": "sliding_window" @@ -29711,20 +31269,26 @@ "sliding_window_percentage": { "type": "number", "title": "Sliding Window Percentage", - "description": "The percentage of the context window to keep post-summarization (only used in sliding window mode)." + "description": "The percentage of the context window to keep post-summarization (only used in sliding window modes)." } }, "type": "object", - "required": ["model"], "title": "CompactionSettings", - "description": "Configuration for conversation compaction / summarization.\n\n``model`` is the only required user-facing field – it specifies the summarizer\nmodel handle (e.g. ``\"openai/gpt-4o-mini\"``). Per-model settings (temperature,\nmax tokens, etc.) are derived from the default configuration for that handle." + "description": "Configuration for conversation compaction / summarization.\n\nPer-model settings (temperature,\nmax tokens, etc.) are derived from the default configuration for that handle." }, "CompactionSettings-Output": { "properties": { "model": { - "type": "string", + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], "title": "Model", - "description": "Model handle to use for summarization (format: provider/model-name)." + "description": "Model handle to use for sliding_window/all summarization (format: provider/model-name). If None, uses lightweight provider-specific defaults." }, "model_settings": { "anyOf": [ @@ -29797,10 +31361,16 @@ "description": "Optional model settings used to override defaults for the summarizer model." }, "prompt": { - "type": "string", + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], "title": "Prompt", - "description": "The prompt to use for summarization.", - "default": "The following messages are being evicted from your context window. Write a detailed summary that captures what happened in these messages.\n\nThis summary will appear BEFORE the remaining recent messages in context, providing background for what comes after. Include:\n\n1. **What happened**: The conversations, tasks, and exchanges that took place. What did the user ask for? What did you do? How did things progress?\n\n2. **High level goals**: If there is an existing summary in the transcript, make sure to take it into consideration to continue tracking the higher level goals and long-term progress. Make sure to not lose track of higher level goals or the ongoing task.\n\n3. **Important details**: Specific names, data, configurations, or facts that were discussed. Don't omit details that might be referenced later.\n\n4. **Lookup hints**: For any detailed content (long lists, extensive data, specific conversations) that couldn't fit in the summary, note the topic and key terms that could be used to find it in message history later.\n\nWrite in first person as a factual record of what occurred. Be thorough and detailed - the goal is to preserve enough context that the recent messages make sense and important information isn't lost.\n\nKeep your summary under 250 words. Only output the summary." + "description": "The prompt to use for summarization. If None, uses mode-specific default." }, "prompt_acknowledgement": { "type": "boolean", @@ -29823,7 +31393,12 @@ }, "mode": { "type": "string", - "enum": ["all", "sliding_window"], + "enum": [ + "all", + "sliding_window", + "self_compact_all", + "self_compact_sliding_window" + ], "title": "Mode", "description": "The type of summarization technique use.", "default": "sliding_window" @@ -29831,13 +31406,69 @@ "sliding_window_percentage": { "type": "number", "title": "Sliding Window Percentage", - "description": "The percentage of the context window to keep post-summarization (only used in sliding window mode)." + "description": "The percentage of the context window to keep post-summarization (only used in sliding window modes)." } }, "type": "object", - "required": ["model"], "title": "CompactionSettings", - "description": "Configuration for conversation compaction / summarization.\n\n``model`` is the only required user-facing field – it specifies the summarizer\nmodel handle (e.g. ``\"openai/gpt-4o-mini\"``). Per-model settings (temperature,\nmax tokens, etc.) are derived from the default configuration for that handle." + "description": "Configuration for conversation compaction / summarization.\n\nPer-model settings (temperature,\nmax tokens, etc.) are derived from the default configuration for that handle." + }, + "CompactionStats": { + "properties": { + "trigger": { + "type": "string", + "title": "Trigger", + "description": "What triggered the compaction (e.g., 'context_window_exceeded', 'post_step_context_check')" + }, + "context_tokens_before": { + "anyOf": [ + { + "type": "integer" + }, + { + "type": "null" + } + ], + "title": "Context Tokens Before", + "description": "Token count before compaction (from LLM usage stats, includes full context sent to LLM)" + }, + "context_tokens_after": { + "anyOf": [ + { + "type": "integer" + }, + { + "type": "null" + } + ], + "title": "Context Tokens After", + "description": "Token count after compaction (message tokens only, does not include tool definitions)" + }, + "context_window": { + "type": "integer", + "title": "Context Window", + "description": "The model's context window size" + }, + "messages_count_before": { + "type": "integer", + "title": "Messages Count Before", + "description": "Number of messages before compaction" + }, + "messages_count_after": { + "type": "integer", + "title": "Messages Count After", + "description": "Number of messages after compaction" + } + }, + "type": "object", + "required": [ + "trigger", + "context_window", + "messages_count_before", + "messages_count_after" + ], + "title": "CompactionStats", + "description": "Statistics about a memory compaction operation." }, "ComparisonOperator": { "type": "string", @@ -30096,6 +31727,60 @@ "title": "Core Memory", "description": "The content of the core memory." }, + "num_tokens_memory_filesystem": { + "type": "integer", + "title": "Num Tokens Memory Filesystem", + "description": "The number of tokens in the memory filesystem section (git-enabled agents only).", + "default": 0 + }, + "memory_filesystem": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "title": "Memory Filesystem", + "description": "The content of the memory filesystem section." + }, + "num_tokens_tool_usage_rules": { + "type": "integer", + "title": "Num Tokens Tool Usage Rules", + "description": "The number of tokens in the tool usage rules section.", + "default": 0 + }, + "tool_usage_rules": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "title": "Tool Usage Rules", + "description": "The content of the tool usage rules section." + }, + "num_tokens_directories": { + "type": "integer", + "title": "Num Tokens Directories", + "description": "The number of tokens in the directories section (attached sources).", + "default": 0 + }, + "directories": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "title": "Directories", + "description": "The content of the directories section." + }, "num_tokens_summary_memory": { "type": "integer", "title": "Num Tokens Summary Memory", @@ -30290,6 +31975,88 @@ "type": "array", "title": "Isolated Block Ids", "description": "IDs of blocks that are isolated (specific to this conversation, overriding agent defaults)." + }, + "model": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "title": "Model", + "description": "The model handle for this conversation (overrides agent's model). Format: provider/model-name." + }, + "model_settings": { + "anyOf": [ + { + "oneOf": [ + { + "$ref": "#/components/schemas/OpenAIModelSettings" + }, + { + "$ref": "#/components/schemas/AnthropicModelSettings" + }, + { + "$ref": "#/components/schemas/GoogleAIModelSettings" + }, + { + "$ref": "#/components/schemas/GoogleVertexModelSettings" + }, + { + "$ref": "#/components/schemas/AzureModelSettings" + }, + { + "$ref": "#/components/schemas/XAIModelSettings" + }, + { + "$ref": "#/components/schemas/ZAIModelSettings" + }, + { + "$ref": "#/components/schemas/GroqModelSettings" + }, + { + "$ref": "#/components/schemas/DeepseekModelSettings" + }, + { + "$ref": "#/components/schemas/TogetherModelSettings" + }, + { + "$ref": "#/components/schemas/BedrockModelSettings" + }, + { + "$ref": "#/components/schemas/OpenRouterModelSettings" + }, + { + "$ref": "#/components/schemas/ChatGPTOAuthModelSettings" + } + ], + "discriminator": { + "propertyName": "provider_type", + "mapping": { + "anthropic": "#/components/schemas/AnthropicModelSettings", + "azure": "#/components/schemas/AzureModelSettings", + "bedrock": "#/components/schemas/BedrockModelSettings", + "chatgpt_oauth": "#/components/schemas/ChatGPTOAuthModelSettings", + "deepseek": "#/components/schemas/DeepseekModelSettings", + "google_ai": "#/components/schemas/GoogleAIModelSettings", + "google_vertex": "#/components/schemas/GoogleVertexModelSettings", + "groq": "#/components/schemas/GroqModelSettings", + "openai": "#/components/schemas/OpenAIModelSettings", + "openrouter": "#/components/schemas/OpenRouterModelSettings", + "together": "#/components/schemas/TogetherModelSettings", + "xai": "#/components/schemas/XAIModelSettings", + "zai": "#/components/schemas/ZAIModelSettings" + } + } + }, + { + "type": "null" + } + ], + "title": "Model Settings", + "description": "The model settings for this conversation (overrides agent's model settings)." } }, "additionalProperties": false, @@ -30310,6 +32077,9 @@ }, { "$ref": "#/components/schemas/ApprovalCreate" + }, + { + "$ref": "#/components/schemas/ToolReturnCreate" } ] }, @@ -30454,6 +32224,36 @@ "title": "Override Model", "description": "Model handle to use for this request instead of the agent's default model. This allows sending a message to a different model without changing the agent's configuration." }, + "include_compaction_messages": { + "type": "boolean", + "title": "Include Compaction Messages", + "description": "If True, compaction events emit structured `SummaryMessage` and `EventMessage` types. If False (default), compaction messages are not included in the response.", + "default": false + }, + "return_logprobs": { + "type": "boolean", + "title": "Return Logprobs", + "description": "If True, returns log probabilities of the output tokens in the response. Useful for RL training. Only supported for OpenAI-compatible providers (including SGLang).", + "default": false + }, + "top_logprobs": { + "anyOf": [ + { + "type": "integer" + }, + { + "type": "null" + } + ], + "title": "Top Logprobs", + "description": "Number of most likely tokens to return at each position (0-20). Requires return_logprobs=True." + }, + "return_token_ids": { + "type": "boolean", + "title": "Return Token Ids", + "description": "If True, returns token IDs and logprobs for ALL LLM generations in the agent step, not just the last one. Uses SGLang native /generate endpoint. Returns 'turns' field with TurnTokenData for each assistant/tool turn. Required for proper multi-turn RL training with loss masking.", + "default": false + }, "streaming": { "type": "boolean", "title": "Streaming", @@ -30598,7 +32398,12 @@ "anyOf": [ { "items": { - "type": "string" + "type": "string", + "maxLength": 41, + "minLength": 41, + "pattern": "^tool-[0-9a-f]{8}-[0-9a-f]{4}-4[0-9a-f]{3}-[89ab][0-9a-f]{3}-[0-9a-f]{12}$", + "description": "The ID of the tool in the format 'tool-'", + "examples": ["tool-123e4567-e89b-42d3-8456-426614174000"] }, "type": "array" }, @@ -30613,7 +32418,12 @@ "anyOf": [ { "items": { - "type": "string" + "type": "string", + "maxLength": 43, + "minLength": 43, + "pattern": "^source-[0-9a-f]{8}-[0-9a-f]{4}-4[0-9a-f]{3}-[89ab][0-9a-f]{3}-[0-9a-f]{12}$", + "description": "The ID of the source in the format 'source-'", + "examples": ["source-123e4567-e89b-42d3-8456-426614174000"] }, "type": "array" }, @@ -30629,7 +32439,12 @@ "anyOf": [ { "items": { - "type": "string" + "type": "string", + "maxLength": 43, + "minLength": 43, + "pattern": "^source-[0-9a-f]{8}-[0-9a-f]{4}-4[0-9a-f]{3}-[89ab][0-9a-f]{3}-[0-9a-f]{12}$", + "description": "The ID of the source in the format 'source-'", + "examples": ["source-123e4567-e89b-42d3-8456-426614174000"] }, "type": "array" }, @@ -30644,7 +32459,12 @@ "anyOf": [ { "items": { - "type": "string" + "type": "string", + "maxLength": 42, + "minLength": 42, + "pattern": "^block-[0-9a-f]{8}-[0-9a-f]{4}-4[0-9a-f]{3}-[89ab][0-9a-f]{3}-[0-9a-f]{12}$", + "description": "The ID of the block in the format 'block-'", + "examples": ["block-123e4567-e89b-42d3-8456-426614174000"] }, "type": "array" }, @@ -31145,7 +32965,12 @@ "anyOf": [ { "items": { - "type": "string" + "type": "string", + "maxLength": 45, + "minLength": 45, + "pattern": "^identity-[0-9a-f]{8}-[0-9a-f]{4}-4[0-9a-f]{3}-[89ab][0-9a-f]{3}-[0-9a-f]{12}$", + "description": "The ID of the identity in the format 'identity-'", + "examples": ["identity-123e4567-e89b-42d3-8456-426614174000"] }, "type": "array" }, @@ -31543,6 +33368,88 @@ ], "title": "Isolated Block Labels", "description": "List of block labels that should be isolated (conversation-specific) rather than shared across conversations. New blocks will be created as copies of the agent's blocks with these labels." + }, + "model": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "title": "Model", + "description": "The model handle for this conversation (overrides agent's model). Format: provider/model-name." + }, + "model_settings": { + "anyOf": [ + { + "oneOf": [ + { + "$ref": "#/components/schemas/OpenAIModelSettings" + }, + { + "$ref": "#/components/schemas/AnthropicModelSettings" + }, + { + "$ref": "#/components/schemas/GoogleAIModelSettings" + }, + { + "$ref": "#/components/schemas/GoogleVertexModelSettings" + }, + { + "$ref": "#/components/schemas/AzureModelSettings" + }, + { + "$ref": "#/components/schemas/XAIModelSettings" + }, + { + "$ref": "#/components/schemas/ZAIModelSettings" + }, + { + "$ref": "#/components/schemas/GroqModelSettings" + }, + { + "$ref": "#/components/schemas/DeepseekModelSettings" + }, + { + "$ref": "#/components/schemas/TogetherModelSettings" + }, + { + "$ref": "#/components/schemas/BedrockModelSettings" + }, + { + "$ref": "#/components/schemas/OpenRouterModelSettings" + }, + { + "$ref": "#/components/schemas/ChatGPTOAuthModelSettings" + } + ], + "discriminator": { + "propertyName": "provider_type", + "mapping": { + "anthropic": "#/components/schemas/AnthropicModelSettings", + "azure": "#/components/schemas/AzureModelSettings", + "bedrock": "#/components/schemas/BedrockModelSettings", + "chatgpt_oauth": "#/components/schemas/ChatGPTOAuthModelSettings", + "deepseek": "#/components/schemas/DeepseekModelSettings", + "google_ai": "#/components/schemas/GoogleAIModelSettings", + "google_vertex": "#/components/schemas/GoogleVertexModelSettings", + "groq": "#/components/schemas/GroqModelSettings", + "openai": "#/components/schemas/OpenAIModelSettings", + "openrouter": "#/components/schemas/OpenRouterModelSettings", + "together": "#/components/schemas/TogetherModelSettings", + "xai": "#/components/schemas/XAIModelSettings", + "zai": "#/components/schemas/ZAIModelSettings" + } + } + }, + { + "type": "null" + } + ], + "title": "Model Settings", + "description": "The model settings for this conversation (overrides agent's model settings)." } }, "type": "object", @@ -31792,7 +33699,7 @@ "type": "boolean", "title": "Parallel Tool Calls", "description": "Whether to enable parallel tool calling.", - "default": false + "default": true }, "provider_type": { "type": "string", @@ -31992,6 +33899,50 @@ "required": ["manager_agent_id"], "title": "DynamicManager" }, + "DynamicManagerSchema": { + "properties": { + "manager_type": { + "type": "string", + "const": "dynamic", + "title": "Manager Type", + "description": "", + "default": "dynamic" + }, + "manager_agent_id": { + "type": "string", + "title": "Manager Agent Id", + "description": "" + }, + "termination_token": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "title": "Termination Token", + "description": "", + "default": "DONE!" + }, + "max_turns": { + "anyOf": [ + { + "type": "integer" + }, + { + "type": "null" + } + ], + "title": "Max Turns", + "description": "" + } + }, + "type": "object", + "required": ["manager_agent_id"], + "title": "DynamicManagerSchema" + }, "DynamicManagerUpdate": { "properties": { "manager_type": { @@ -32400,7 +34351,7 @@ }, "message_type": { "type": "string", - "const": "event", + "const": "event_message", "title": "Message Type", "default": "event_message" }, @@ -32486,6 +34437,39 @@ "title": "EventMessage", "description": "A message for notifying the developer that an event that has occured (e.g. a compaction). Events are NOT part of the context window." }, + "ExportAgentRequest": { + "properties": { + "skills": { + "items": { + "$ref": "#/components/schemas/SkillSchema" + }, + "type": "array", + "title": "Skills", + "description": "Skills to include in the export. Each skill must have a name and files (including SKILL.md)." + }, + "conversation_id": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "title": "Conversation Id", + "description": "Conversation ID to export. If provided, uses messages from this conversation instead of the agent's global message history." + }, + "scrub_messages": { + "type": "boolean", + "title": "Scrub Messages", + "description": "If True, excludes all messages from the export. Useful for sharing agent configs without conversation history.", + "default": false + } + }, + "type": "object", + "title": "ExportAgentRequest", + "description": "Request body for POST /export endpoint." + }, "FeedbackType": { "type": "string", "enum": ["positive", "negative"], @@ -33476,6 +35460,79 @@ "type": "object", "title": "GeminiThinkingConfig" }, + "GenerateRequest": { + "properties": { + "prompt": { + "type": "string", + "minLength": 1, + "title": "Prompt", + "description": "The prompt/message to send to the LLM" + }, + "system_prompt": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "title": "System Prompt", + "description": "Optional system prompt to prepend to the conversation" + }, + "override_model": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "title": "Override Model", + "description": "Model handle to use instead of agent's default (e.g., 'openai/gpt-4', 'anthropic/claude-3-5-sonnet')" + }, + "response_schema": { + "anyOf": [ + { + "additionalProperties": true, + "type": "object" + }, + { + "type": "null" + } + ], + "title": "Response Schema", + "description": "JSON schema for structured output. When provided, the LLM will be forced to return a response matching this schema via tool calling. The schema should follow JSON Schema format with 'properties' and optionally 'required' fields." + } + }, + "type": "object", + "required": ["prompt"], + "title": "GenerateRequest", + "description": "Request for direct LLM generation without agent processing." + }, + "GenerateResponse": { + "properties": { + "content": { + "type": "string", + "title": "Content", + "description": "The LLM's response text" + }, + "model": { + "type": "string", + "title": "Model", + "description": "The model that generated this response" + }, + "usage": { + "$ref": "#/components/schemas/LettaUsageStatistics", + "description": "Token usage statistics" + } + }, + "type": "object", + "required": ["content", "model", "usage"], + "title": "GenerateResponse", + "description": "Response from direct LLM generation." + }, "GenerateToolInput": { "properties": { "tool_name": { @@ -33559,7 +35616,7 @@ "type": "boolean", "title": "Parallel Tool Calls", "description": "Whether to enable parallel tool calling.", - "default": false + "default": true }, "provider_type": { "type": "string", @@ -33628,7 +35685,7 @@ "type": "boolean", "title": "Parallel Tool Calls", "description": "Whether to enable parallel tool calling.", - "default": false + "default": true }, "provider_type": { "type": "string", @@ -33697,7 +35754,7 @@ "type": "boolean", "title": "Parallel Tool Calls", "description": "Whether to enable parallel tool calling.", - "default": false + "default": true }, "provider_type": { "type": "string", @@ -34045,16 +36102,11 @@ "properties": { "agent_ids": { "items": { - "type": "string", - "maxLength": 42, - "minLength": 42, - "pattern": "^agent-[0-9a-f]{8}-[0-9a-f]{4}-4[0-9a-f]{3}-[89ab][0-9a-f]{3}-[0-9a-f]{12}$", - "description": "The ID of the agent in the format 'agent-'", - "examples": ["agent-123e4567-e89b-42d3-8456-426614174000"] + "type": "string" }, "type": "array", "title": "Agent Ids", - "description": "" + "description": "List of agent IDs in this group" }, "description": { "type": "string", @@ -34067,16 +36119,16 @@ "$ref": "#/components/schemas/RoundRobinManager" }, { - "$ref": "#/components/schemas/SupervisorManager" + "$ref": "#/components/schemas/SupervisorManagerSchema" }, { - "$ref": "#/components/schemas/DynamicManager" + "$ref": "#/components/schemas/DynamicManagerSchema" }, { - "$ref": "#/components/schemas/SleeptimeManager" + "$ref": "#/components/schemas/SleeptimeManagerSchema" }, { - "$ref": "#/components/schemas/VoiceSleeptimeManager" + "$ref": "#/components/schemas/VoiceSleeptimeManagerSchema" } ], "title": "Manager Config", @@ -34087,11 +36139,11 @@ "discriminator": { "propertyName": "manager_type", "mapping": { - "dynamic": "#/components/schemas/DynamicManager", + "dynamic": "#/components/schemas/DynamicManagerSchema", "round_robin": "#/components/schemas/RoundRobinManager", - "sleeptime": "#/components/schemas/SleeptimeManager", - "supervisor": "#/components/schemas/SupervisorManager", - "voice_sleeptime": "#/components/schemas/VoiceSleeptimeManager" + "sleeptime": "#/components/schemas/SleeptimeManagerSchema", + "supervisor": "#/components/schemas/SupervisorManagerSchema", + "voice_sleeptime": "#/components/schemas/VoiceSleeptimeManagerSchema" } } }, @@ -34109,18 +36161,12 @@ }, "shared_block_ids": { "items": { - "type": "string", - "maxLength": 42, - "minLength": 42, - "pattern": "^block-[0-9a-f]{8}-[0-9a-f]{4}-4[0-9a-f]{3}-[89ab][0-9a-f]{3}-[0-9a-f]{12}$", - "description": "The ID of the block in the format 'block-'", - "examples": ["block-123e4567-e89b-42d3-8456-426614174000"] + "type": "string" }, "type": "array", "title": "Shared Block Ids", - "description": "", - "default": [], - "deprecated": true + "description": "List of shared block IDs", + "default": [] }, "hidden": { "anyOf": [ @@ -34977,7 +37023,12 @@ "anyOf": [ { "items": { - "type": "string" + "type": "string", + "maxLength": 41, + "minLength": 41, + "pattern": "^tool-[0-9a-f]{8}-[0-9a-f]{4}-4[0-9a-f]{3}-[89ab][0-9a-f]{3}-[0-9a-f]{12}$", + "description": "The ID of the tool in the format 'tool-'", + "examples": ["tool-123e4567-e89b-42d3-8456-426614174000"] }, "type": "array" }, @@ -34992,7 +37043,12 @@ "anyOf": [ { "items": { - "type": "string" + "type": "string", + "maxLength": 43, + "minLength": 43, + "pattern": "^source-[0-9a-f]{8}-[0-9a-f]{4}-4[0-9a-f]{3}-[89ab][0-9a-f]{3}-[0-9a-f]{12}$", + "description": "The ID of the source in the format 'source-'", + "examples": ["source-123e4567-e89b-42d3-8456-426614174000"] }, "type": "array" }, @@ -35008,7 +37064,12 @@ "anyOf": [ { "items": { - "type": "string" + "type": "string", + "maxLength": 43, + "minLength": 43, + "pattern": "^source-[0-9a-f]{8}-[0-9a-f]{4}-4[0-9a-f]{3}-[89ab][0-9a-f]{3}-[0-9a-f]{12}$", + "description": "The ID of the source in the format 'source-'", + "examples": ["source-123e4567-e89b-42d3-8456-426614174000"] }, "type": "array" }, @@ -35023,7 +37084,12 @@ "anyOf": [ { "items": { - "type": "string" + "type": "string", + "maxLength": 42, + "minLength": 42, + "pattern": "^block-[0-9a-f]{8}-[0-9a-f]{4}-4[0-9a-f]{3}-[89ab][0-9a-f]{3}-[0-9a-f]{12}$", + "description": "The ID of the block in the format 'block-'", + "examples": ["block-123e4567-e89b-42d3-8456-426614174000"] }, "type": "array" }, @@ -35508,7 +37574,12 @@ "anyOf": [ { "items": { - "type": "string" + "type": "string", + "maxLength": 45, + "minLength": 45, + "pattern": "^identity-[0-9a-f]{8}-[0-9a-f]{4}-4[0-9a-f]{3}-[89ab][0-9a-f]{3}-[0-9a-f]{12}$", + "description": "The ID of the identity in the format 'identity-'", + "examples": ["identity-123e4567-e89b-42d3-8456-426614174000"] }, "type": "array" }, @@ -36354,14 +38425,14 @@ "anyOf": [ { "type": "string", - "enum": ["low", "medium", "high"] + "enum": ["low", "medium", "high", "max"] }, { "type": "null" } ], "title": "Effort", - "description": "The effort level for Anthropic Opus 4.5 model (controls token spending). Not setting this gives similar performance to 'high'." + "description": "The effort level for Anthropic models that support it (Opus 4.5, Opus 4.6). Controls token spending and thinking behavior. Not setting this gives similar performance to 'high'." }, "frequency_penalty": { "anyOf": [ @@ -36462,6 +38533,30 @@ "title": "Strict", "description": "Enable strict mode for tool calling. When true, tool schemas include strict: true and additionalProperties: false, guaranteeing tool outputs match JSON schemas.", "default": false + }, + "return_logprobs": { + "type": "boolean", + "title": "Return Logprobs", + "description": "Whether to return log probabilities of the output tokens. Useful for RL training.", + "default": false + }, + "top_logprobs": { + "anyOf": [ + { + "type": "integer" + }, + { + "type": "null" + } + ], + "title": "Top Logprobs", + "description": "Number of most likely tokens to return at each position (0-20). Requires return_logprobs=True." + }, + "return_token_ids": { + "type": "boolean", + "title": "Return Token Ids", + "description": "Whether to return token IDs for all LLM generations via SGLang native endpoint. Required for multi-turn RL training with loss masking. Only works with SGLang provider.", + "default": false } }, "type": "object", @@ -36481,6 +38576,9 @@ }, { "$ref": "#/components/schemas/ApprovalCreate" + }, + { + "$ref": "#/components/schemas/ToolReturnCreate" } ] }, @@ -36625,6 +38723,36 @@ "title": "Override Model", "description": "Model handle to use for this request instead of the agent's default model. This allows sending a message to a different model without changing the agent's configuration." }, + "include_compaction_messages": { + "type": "boolean", + "title": "Include Compaction Messages", + "description": "If True, compaction events emit structured `SummaryMessage` and `EventMessage` types. If False (default), compaction messages are not included in the response.", + "default": false + }, + "return_logprobs": { + "type": "boolean", + "title": "Return Logprobs", + "description": "If True, returns log probabilities of the output tokens in the response. Useful for RL training. Only supported for OpenAI-compatible providers (including SGLang).", + "default": false + }, + "top_logprobs": { + "anyOf": [ + { + "type": "integer" + }, + { + "type": "null" + } + ], + "title": "Top Logprobs", + "description": "Number of most likely tokens to return at each position (0-20). Requires return_logprobs=True." + }, + "return_token_ids": { + "type": "boolean", + "title": "Return Token Ids", + "description": "If True, returns token IDs and logprobs for ALL LLM generations in the agent step, not just the last one. Uses SGLang native /generate endpoint. Returns 'turns' field with TurnTokenData for each assistant/tool turn. Required for proper multi-turn RL training with loss masking.", + "default": false + }, "callback_url": { "anyOf": [ { @@ -36667,6 +38795,9 @@ }, { "$ref": "#/components/schemas/ApprovalCreate" + }, + { + "$ref": "#/components/schemas/ToolReturnCreate" } ] }, @@ -36811,6 +38942,36 @@ "title": "Override Model", "description": "Model handle to use for this request instead of the agent's default model. This allows sending a message to a different model without changing the agent's configuration." }, + "include_compaction_messages": { + "type": "boolean", + "title": "Include Compaction Messages", + "description": "If True, compaction events emit structured `SummaryMessage` and `EventMessage` types. If False (default), compaction messages are not included in the response.", + "default": false + }, + "return_logprobs": { + "type": "boolean", + "title": "Return Logprobs", + "description": "If True, returns log probabilities of the output tokens in the response. Useful for RL training. Only supported for OpenAI-compatible providers (including SGLang).", + "default": false + }, + "top_logprobs": { + "anyOf": [ + { + "type": "integer" + }, + { + "type": "null" + } + ], + "title": "Top Logprobs", + "description": "Number of most likely tokens to return at each position (0-20). Requires return_logprobs=True." + }, + "return_token_ids": { + "type": "boolean", + "title": "Return Token Ids", + "description": "If True, returns token IDs and logprobs for ALL LLM generations in the agent step, not just the last one. Uses SGLang native /generate endpoint. Returns 'turns' field with TurnTokenData for each assistant/tool turn. Required for proper multi-turn RL training with loss masking.", + "default": false + }, "agent_id": { "type": "string", "maxLength": 42, @@ -37033,6 +39194,9 @@ }, { "$ref": "#/components/schemas/ApprovalCreate" + }, + { + "$ref": "#/components/schemas/ToolReturnCreate" } ] }, @@ -37176,6 +39340,36 @@ ], "title": "Override Model", "description": "Model handle to use for this request instead of the agent's default model. This allows sending a message to a different model without changing the agent's configuration." + }, + "include_compaction_messages": { + "type": "boolean", + "title": "Include Compaction Messages", + "description": "If True, compaction events emit structured `SummaryMessage` and `EventMessage` types. If False (default), compaction messages are not included in the response.", + "default": false + }, + "return_logprobs": { + "type": "boolean", + "title": "Return Logprobs", + "description": "If True, returns log probabilities of the output tokens in the response. Useful for RL training. Only supported for OpenAI-compatible providers (including SGLang).", + "default": false + }, + "top_logprobs": { + "anyOf": [ + { + "type": "integer" + }, + { + "type": "null" + } + ], + "title": "Top Logprobs", + "description": "Number of most likely tokens to return at each position (0-20). Requires return_logprobs=True." + }, + "return_token_ids": { + "type": "boolean", + "title": "Return Token Ids", + "description": "If True, returns token IDs and logprobs for ALL LLM generations in the agent step, not just the last one. Uses SGLang native /generate endpoint. Returns 'turns' field with TurnTokenData for each assistant/tool turn. Required for proper multi-turn RL training with loss masking.", + "default": false } }, "type": "object", @@ -37236,6 +39430,32 @@ "usage": { "$ref": "#/components/schemas/LettaUsageStatistics", "description": "The usage statistics of the agent." + }, + "logprobs": { + "anyOf": [ + { + "$ref": "#/components/schemas/letta__schemas__openai__chat_completion_response__ChoiceLogprobs" + }, + { + "type": "null" + } + ], + "description": "Log probabilities of the output tokens from the last LLM call. Only present if return_logprobs was enabled." + }, + "turns": { + "anyOf": [ + { + "items": { + "$ref": "#/components/schemas/TurnTokenData" + }, + "type": "array" + }, + { + "type": "null" + } + ], + "title": "Turns", + "description": "Token data for all LLM generations in multi-turn agent interaction. Includes token IDs and logprobs for each assistant turn, plus tool result content. Only present if return_token_ids was enabled. Used for RL training with loss masking." } }, "type": "object", @@ -37274,6 +39494,9 @@ }, { "$ref": "#/components/schemas/ApprovalCreate" + }, + { + "$ref": "#/components/schemas/ToolReturnCreate" } ] }, @@ -37418,6 +39641,36 @@ "title": "Override Model", "description": "Model handle to use for this request instead of the agent's default model. This allows sending a message to a different model without changing the agent's configuration." }, + "include_compaction_messages": { + "type": "boolean", + "title": "Include Compaction Messages", + "description": "If True, compaction events emit structured `SummaryMessage` and `EventMessage` types. If False (default), compaction messages are not included in the response.", + "default": false + }, + "return_logprobs": { + "type": "boolean", + "title": "Return Logprobs", + "description": "If True, returns log probabilities of the output tokens in the response. Useful for RL training. Only supported for OpenAI-compatible providers (including SGLang).", + "default": false + }, + "top_logprobs": { + "anyOf": [ + { + "type": "integer" + }, + { + "type": "null" + } + ], + "title": "Top Logprobs", + "description": "Number of most likely tokens to return at each position (0-20). Requires return_logprobs=True." + }, + "return_token_ids": { + "type": "boolean", + "title": "Return Token Ids", + "description": "If True, returns token IDs and logprobs for ALL LLM generations in the agent step, not just the last one. Uses SGLang native /generate endpoint. Returns 'turns' field with TurnTokenData for each assistant/tool turn. Required for proper multi-turn RL training with loss masking.", + "default": false + }, "streaming": { "type": "boolean", "title": "Streaming", @@ -37591,6 +39844,18 @@ ], "title": "Reasoning Tokens", "description": "The number of reasoning/thinking tokens generated. None if not reported by provider." + }, + "context_tokens": { + "anyOf": [ + { + "type": "integer" + }, + { + "type": "null" + } + ], + "title": "Context Tokens", + "description": "Estimate of tokens currently in the context window." } }, "type": "object", @@ -37915,6 +40180,12 @@ "title": "Agent Type", "description": "Agent type controlling prompt rendering." }, + "git_enabled": { + "type": "boolean", + "title": "Git Enabled", + "description": "Whether this agent uses git-backed memory with structured labels.", + "default": false + }, "blocks": { "items": { "$ref": "#/components/schemas/Block" @@ -38396,7 +40667,15 @@ }, "MessageRole": { "type": "string", - "enum": ["assistant", "user", "tool", "function", "system", "approval"], + "enum": [ + "assistant", + "user", + "tool", + "function", + "system", + "approval", + "summary" + ], "title": "MessageRole" }, "MessageSearchRequest": { @@ -38579,7 +40858,9 @@ "tool_call_message", "tool_return_message", "approval_request_message", - "approval_response_message" + "approval_response_message", + "summary_message", + "event_message" ], "title": "MessageType" }, @@ -38834,14 +41115,14 @@ "anyOf": [ { "type": "string", - "enum": ["low", "medium", "high"] + "enum": ["low", "medium", "high", "max"] }, { "type": "null" } ], "title": "Effort", - "description": "The effort level for Anthropic Opus 4.5 model (controls token spending). Not setting this gives similar performance to 'high'." + "description": "The effort level for Anthropic models that support it (Opus 4.5, Opus 4.6). Controls token spending and thinking behavior. Not setting this gives similar performance to 'high'." }, "frequency_penalty": { "anyOf": [ @@ -38947,6 +41228,30 @@ "description": "Enable strict mode for tool calling. When true, tool schemas include strict: true and additionalProperties: false, guaranteeing tool outputs match JSON schemas.", "default": false }, + "return_logprobs": { + "type": "boolean", + "title": "Return Logprobs", + "description": "Whether to return log probabilities of the output tokens. Useful for RL training.", + "default": false + }, + "top_logprobs": { + "anyOf": [ + { + "type": "integer" + }, + { + "type": "null" + } + ], + "title": "Top Logprobs", + "description": "Number of most likely tokens to return at each position (0-20). Requires return_logprobs=True." + }, + "return_token_ids": { + "type": "boolean", + "title": "Return Token Ids", + "description": "Whether to return token IDs for all LLM generations via SGLang native endpoint. Required for multi-turn RL training with loss masking. Only works with SGLang provider.", + "default": false + }, "max_context_window": { "type": "integer", "title": "Max Context Window", @@ -39073,7 +41378,7 @@ "type": "boolean", "title": "Parallel Tool Calls", "description": "Whether to enable parallel tool calling.", - "default": false + "default": true }, "provider_type": { "type": "string", @@ -39160,7 +41465,7 @@ "type": "boolean", "title": "Parallel Tool Calls", "description": "Whether to enable parallel tool calling.", - "default": false + "default": true }, "provider_type": { "type": "string", @@ -41694,6 +43999,46 @@ "required": ["query"], "title": "SearchAllMessagesRequest" }, + "SkillSchema": { + "properties": { + "name": { + "type": "string", + "title": "Name", + "description": "Skill name, also serves as unique identifier (e.g., 'slack', 'pdf')" + }, + "files": { + "anyOf": [ + { + "additionalProperties": { + "type": "string" + }, + "type": "object" + }, + { + "type": "null" + } + ], + "title": "Files", + "description": "Skill files as path -> content mapping. Must include 'SKILL.md' key if provided." + }, + "source_url": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "title": "Source Url", + "description": "Source URL for skill resolution (e.g., 'letta:slack', 'anthropic:pdf', 'owner/repo/path')" + } + }, + "type": "object", + "required": ["name"], + "title": "SkillSchema", + "description": "Skill schema for agent files.\n\nSkills are folders of instructions, scripts, and resources that agents can load.\nEither files (with SKILL.md) or source_url must be provided:\n- files with SKILL.md: inline skill content\n- source_url: reference to resolve later (e.g., 'letta:slack')\n- both: inline content with provenance tracking" + }, "SleeptimeManager": { "properties": { "manager_type": { @@ -41729,6 +44074,37 @@ "required": ["manager_agent_id"], "title": "SleeptimeManager" }, + "SleeptimeManagerSchema": { + "properties": { + "manager_type": { + "type": "string", + "const": "sleeptime", + "title": "Manager Type", + "description": "", + "default": "sleeptime" + }, + "manager_agent_id": { + "type": "string", + "title": "Manager Agent Id", + "description": "" + }, + "sleeptime_agent_frequency": { + "anyOf": [ + { + "type": "integer" + }, + { + "type": "null" + } + ], + "title": "Sleeptime Agent Frequency", + "description": "" + } + }, + "type": "object", + "required": ["manager_agent_id"], + "title": "SleeptimeManagerSchema" + }, "SleeptimeManagerUpdate": { "properties": { "manager_type": { @@ -42364,6 +44740,18 @@ "title": "Model", "description": "The name of the model used for this step." }, + "model_handle": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "title": "Model Handle", + "description": "The model handle (e.g., 'openai/gpt-4o-mini') used for this step." + }, "model_endpoint": { "anyOf": [ { @@ -42424,6 +44812,42 @@ "title": "Total Tokens", "description": "The total number of tokens processed by the agent during this step." }, + "cached_input_tokens": { + "anyOf": [ + { + "type": "integer" + }, + { + "type": "null" + } + ], + "title": "Cached Input Tokens", + "description": "The number of input tokens served from cache. None if not reported by provider." + }, + "cache_write_tokens": { + "anyOf": [ + { + "type": "integer" + }, + { + "type": "null" + } + ], + "title": "Cache Write Tokens", + "description": "The number of input tokens written to cache (Anthropic only). None if not reported by provider." + }, + "reasoning_tokens": { + "anyOf": [ + { + "type": "integer" + }, + { + "type": "null" + } + ], + "title": "Reasoning Tokens", + "description": "The number of reasoning/thinking tokens generated. None if not reported by provider." + }, "completion_tokens_details": { "anyOf": [ { @@ -42748,6 +45172,7 @@ "no_tool_call", "tool_rule", "cancelled", + "insufficient_credits", "requires_approval", "context_window_overflow_in_system_prompt" ], @@ -42959,7 +45384,7 @@ }, "message_type": { "type": "string", - "const": "summary", + "const": "summary_message", "title": "Message Type", "default": "summary_message" }, @@ -43032,6 +45457,16 @@ "summary": { "type": "string", "title": "Summary" + }, + "compaction_stats": { + "anyOf": [ + { + "$ref": "#/components/schemas/CompactionStats" + }, + { + "type": "null" + } + ] } }, "type": "object", @@ -43062,6 +45497,25 @@ "required": ["manager_agent_id"], "title": "SupervisorManager" }, + "SupervisorManagerSchema": { + "properties": { + "manager_type": { + "type": "string", + "const": "supervisor", + "title": "Manager Type", + "description": "", + "default": "supervisor" + }, + "manager_agent_id": { + "type": "string", + "title": "Manager Agent Id", + "description": "" + } + }, + "type": "object", + "required": ["manager_agent_id"], + "title": "SupervisorManagerSchema" + }, "SupervisorManagerUpdate": { "properties": { "manager_type": { @@ -43353,7 +45807,7 @@ "type": "boolean", "title": "Parallel Tool Calls", "description": "Whether to enable parallel tool calling.", - "default": false + "default": true }, "provider_type": { "type": "string", @@ -44255,6 +46709,29 @@ "required": ["tool_call_id", "content", "is_error"], "title": "ToolReturnContent" }, + "ToolReturnCreate": { + "properties": { + "type": { + "type": "string", + "const": "tool_return", + "title": "Type", + "description": "The message type to be created.", + "default": "tool_return" + }, + "tool_returns": { + "items": { + "$ref": "#/components/schemas/letta__schemas__letta_message__ToolReturn" + }, + "type": "array", + "title": "Tool Returns", + "description": "List of tool returns from client-side execution" + } + }, + "type": "object", + "required": ["tool_returns"], + "title": "ToolReturnCreate", + "description": "Submit tool return(s) from client-side tool execution.\n\nThis is the preferred way to send tool results back to the agent after\nclient-side tool execution. It is equivalent to sending an ApprovalCreate\nwith tool return approvals, but provides a cleaner API for the common case." + }, "ToolReturnMessage": { "properties": { "id": { @@ -44824,13 +47301,15 @@ "type": "object", "title": "ToolUpdate" }, - "TopLogprob": { + "TurnTokenData": { "properties": { - "token": { + "role": { "type": "string", - "title": "Token" + "enum": ["assistant", "tool"], + "title": "Role", + "description": "Role of this turn: 'assistant' for LLM generations (trainable), 'tool' for tool results (non-trainable)." }, - "bytes": { + "output_ids": { "anyOf": [ { "items": { @@ -44842,17 +47321,54 @@ "type": "null" } ], - "title": "Bytes" + "title": "Output Ids", + "description": "Token IDs from SGLang native endpoint. Only present for assistant turns." }, - "logprob": { - "type": "number", - "title": "Logprob" + "output_token_logprobs": { + "anyOf": [ + { + "items": { + "items": {}, + "type": "array" + }, + "type": "array" + }, + { + "type": "null" + } + ], + "title": "Output Token Logprobs", + "description": "Logprobs from SGLang: [[logprob, token_id, top_logprob_or_null], ...]. Only present for assistant turns." + }, + "content": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "title": "Content", + "description": "Text content. For tool turns, client tokenizes this with loss_mask=0." + }, + "tool_name": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "title": "Tool Name", + "description": "Name of the tool called. Only present for tool turns." } }, - "additionalProperties": true, "type": "object", - "required": ["token", "logprob"], - "title": "TopLogprob" + "required": ["role"], + "title": "TurnTokenData", + "description": "Token data for a single LLM generation turn in a multi-turn agent interaction.\n\nUsed for RL training to track token IDs and logprobs across all LLM calls,\nnot just the final one. Tool results are included so the client can tokenize\nthem with loss_mask=0 (non-trainable)." }, "UpdateAgent": { "properties": { @@ -44872,7 +47388,12 @@ "anyOf": [ { "items": { - "type": "string" + "type": "string", + "maxLength": 41, + "minLength": 41, + "pattern": "^tool-[0-9a-f]{8}-[0-9a-f]{4}-4[0-9a-f]{3}-[89ab][0-9a-f]{3}-[0-9a-f]{12}$", + "description": "The ID of the tool in the format 'tool-'", + "examples": ["tool-123e4567-e89b-42d3-8456-426614174000"] }, "type": "array" }, @@ -44887,7 +47408,12 @@ "anyOf": [ { "items": { - "type": "string" + "type": "string", + "maxLength": 43, + "minLength": 43, + "pattern": "^source-[0-9a-f]{8}-[0-9a-f]{4}-4[0-9a-f]{3}-[89ab][0-9a-f]{3}-[0-9a-f]{12}$", + "description": "The ID of the source in the format 'source-'", + "examples": ["source-123e4567-e89b-42d3-8456-426614174000"] }, "type": "array" }, @@ -44903,7 +47429,12 @@ "anyOf": [ { "items": { - "type": "string" + "type": "string", + "maxLength": 43, + "minLength": 43, + "pattern": "^source-[0-9a-f]{8}-[0-9a-f]{4}-4[0-9a-f]{3}-[89ab][0-9a-f]{3}-[0-9a-f]{12}$", + "description": "The ID of the source in the format 'source-'", + "examples": ["source-123e4567-e89b-42d3-8456-426614174000"] }, "type": "array" }, @@ -44918,7 +47449,12 @@ "anyOf": [ { "items": { - "type": "string" + "type": "string", + "maxLength": 42, + "minLength": 42, + "pattern": "^block-[0-9a-f]{8}-[0-9a-f]{4}-4[0-9a-f]{3}-[89ab][0-9a-f]{3}-[0-9a-f]{12}$", + "description": "The ID of the block in the format 'block-'", + "examples": ["block-123e4567-e89b-42d3-8456-426614174000"] }, "type": "array" }, @@ -45017,7 +47553,12 @@ "anyOf": [ { "items": { - "type": "string" + "type": "string", + "maxLength": 44, + "minLength": 44, + "pattern": "^message-[0-9a-f]{8}-[0-9a-f]{4}-4[0-9a-f]{3}-[89ab][0-9a-f]{3}-[0-9a-f]{12}$", + "description": "The ID of the message in the format 'message-'", + "examples": ["message-123e4567-e89b-42d3-8456-426614174000"] }, "type": "array" }, @@ -45123,7 +47664,12 @@ "anyOf": [ { "items": { - "type": "string" + "type": "string", + "maxLength": 45, + "minLength": 45, + "pattern": "^identity-[0-9a-f]{8}-[0-9a-f]{4}-4[0-9a-f]{3}-[89ab][0-9a-f]{3}-[0-9a-f]{12}$", + "description": "The ID of the identity in the format 'identity-'", + "examples": ["identity-123e4567-e89b-42d3-8456-426614174000"] }, "type": "array" }, @@ -45497,6 +48043,88 @@ ], "title": "Summary", "description": "A summary of the conversation." + }, + "model": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "title": "Model", + "description": "The model handle for this conversation (overrides agent's model). Format: provider/model-name." + }, + "model_settings": { + "anyOf": [ + { + "oneOf": [ + { + "$ref": "#/components/schemas/OpenAIModelSettings" + }, + { + "$ref": "#/components/schemas/AnthropicModelSettings" + }, + { + "$ref": "#/components/schemas/GoogleAIModelSettings" + }, + { + "$ref": "#/components/schemas/GoogleVertexModelSettings" + }, + { + "$ref": "#/components/schemas/AzureModelSettings" + }, + { + "$ref": "#/components/schemas/XAIModelSettings" + }, + { + "$ref": "#/components/schemas/ZAIModelSettings" + }, + { + "$ref": "#/components/schemas/GroqModelSettings" + }, + { + "$ref": "#/components/schemas/DeepseekModelSettings" + }, + { + "$ref": "#/components/schemas/TogetherModelSettings" + }, + { + "$ref": "#/components/schemas/BedrockModelSettings" + }, + { + "$ref": "#/components/schemas/OpenRouterModelSettings" + }, + { + "$ref": "#/components/schemas/ChatGPTOAuthModelSettings" + } + ], + "discriminator": { + "propertyName": "provider_type", + "mapping": { + "anthropic": "#/components/schemas/AnthropicModelSettings", + "azure": "#/components/schemas/AzureModelSettings", + "bedrock": "#/components/schemas/BedrockModelSettings", + "chatgpt_oauth": "#/components/schemas/ChatGPTOAuthModelSettings", + "deepseek": "#/components/schemas/DeepseekModelSettings", + "google_ai": "#/components/schemas/GoogleAIModelSettings", + "google_vertex": "#/components/schemas/GoogleVertexModelSettings", + "groq": "#/components/schemas/GroqModelSettings", + "openai": "#/components/schemas/OpenAIModelSettings", + "openrouter": "#/components/schemas/OpenRouterModelSettings", + "together": "#/components/schemas/TogetherModelSettings", + "xai": "#/components/schemas/XAIModelSettings", + "zai": "#/components/schemas/ZAIModelSettings" + } + } + }, + { + "type": "null" + } + ], + "title": "Model Settings", + "description": "The model settings for this conversation (overrides agent's model settings)." } }, "type": "object", @@ -45976,8 +48604,12 @@ "properties": { "id": { "type": "string", + "maxLength": 41, + "minLength": 41, + "pattern": "^user-[0-9a-f]{8}-[0-9a-f]{4}-4[0-9a-f]{3}-[89ab][0-9a-f]{3}-[0-9a-f]{12}$", "title": "Id", - "description": "The id of the user to update." + "description": "The id of the user to update.", + "examples": ["user-123e4567-e89b-42d3-8456-426614174000"] }, "name": { "anyOf": [ @@ -46079,6 +48711,49 @@ "required": ["manager_agent_id"], "title": "VoiceSleeptimeManager" }, + "VoiceSleeptimeManagerSchema": { + "properties": { + "manager_type": { + "type": "string", + "const": "voice_sleeptime", + "title": "Manager Type", + "description": "", + "default": "voice_sleeptime" + }, + "manager_agent_id": { + "type": "string", + "title": "Manager Agent Id", + "description": "" + }, + "max_message_buffer_length": { + "anyOf": [ + { + "type": "integer" + }, + { + "type": "null" + } + ], + "title": "Max Message Buffer Length", + "description": "" + }, + "min_message_buffer_length": { + "anyOf": [ + { + "type": "integer" + }, + { + "type": "null" + } + ], + "title": "Min Message Buffer Length", + "description": "" + } + }, + "type": "object", + "required": ["manager_agent_id"], + "title": "VoiceSleeptimeManagerSchema" + }, "VoiceSleeptimeManagerUpdate": { "properties": { "manager_type": { @@ -46145,7 +48820,7 @@ "type": "boolean", "title": "Parallel Tool Calls", "description": "Whether to enable parallel tool calling.", - "default": false + "default": true }, "provider_type": { "type": "string", @@ -46207,7 +48882,7 @@ "type": "boolean", "title": "Parallel Tool Calls", "description": "Whether to enable parallel tool calling.", - "default": false + "default": true }, "provider_type": { "type": "string", @@ -46251,12 +48926,40 @@ ], "title": "Response Format", "description": "The response format for the model." + }, + "thinking": { + "$ref": "#/components/schemas/ZAIThinking", + "description": "The thinking configuration for GLM-4.5+ models.", + "default": { + "type": "enabled", + "clear_thinking": false + } } }, "type": "object", "title": "ZAIModelSettings", "description": "Z.ai (ZhipuAI) model configuration (OpenAI-compatible)." }, + "ZAIThinking": { + "properties": { + "type": { + "type": "string", + "enum": ["enabled", "disabled"], + "title": "Type", + "description": "Whether thinking is enabled or disabled.", + "default": "enabled" + }, + "clear_thinking": { + "type": "boolean", + "title": "Clear Thinking", + "description": "If False, preserved thinking is used (recommended for agents).", + "default": false + } + }, + "type": "object", + "title": "ZAIThinking", + "description": "Thinking configuration for ZAI GLM-4.5+ models." + }, "letta__schemas__agent_file__AgentSchema": { "properties": { "name": { @@ -46322,8 +49025,7 @@ } ], "title": "Source Ids", - "description": "Deprecated: Use `folder_ids` field instead. The ids of the sources used by the agent.", - "deprecated": true + "description": "The ids of the sources used by the agent." }, "folder_ids": { "anyOf": [ @@ -48060,6 +50762,105 @@ "required": ["status"], "title": "ToolReturn" }, + "letta__schemas__openai__chat_completion_response__ChatCompletionTokenLogprob": { + "properties": { + "token": { + "type": "string", + "title": "Token" + }, + "bytes": { + "anyOf": [ + { + "items": { + "type": "integer" + }, + "type": "array" + }, + { + "type": "null" + } + ], + "title": "Bytes" + }, + "logprob": { + "type": "number", + "title": "Logprob" + }, + "top_logprobs": { + "items": { + "$ref": "#/components/schemas/letta__schemas__openai__chat_completion_response__TopLogprob" + }, + "type": "array", + "title": "Top Logprobs" + } + }, + "type": "object", + "required": ["token", "logprob", "top_logprobs"], + "title": "ChatCompletionTokenLogprob" + }, + "letta__schemas__openai__chat_completion_response__ChoiceLogprobs": { + "properties": { + "content": { + "anyOf": [ + { + "items": { + "$ref": "#/components/schemas/letta__schemas__openai__chat_completion_response__ChatCompletionTokenLogprob" + }, + "type": "array" + }, + { + "type": "null" + } + ], + "title": "Content" + }, + "refusal": { + "anyOf": [ + { + "items": { + "$ref": "#/components/schemas/letta__schemas__openai__chat_completion_response__ChatCompletionTokenLogprob" + }, + "type": "array" + }, + { + "type": "null" + } + ], + "title": "Refusal" + } + }, + "type": "object", + "title": "ChoiceLogprobs" + }, + "letta__schemas__openai__chat_completion_response__TopLogprob": { + "properties": { + "token": { + "type": "string", + "title": "Token" + }, + "bytes": { + "anyOf": [ + { + "items": { + "type": "integer" + }, + "type": "array" + }, + { + "type": "null" + } + ], + "title": "Bytes" + }, + "logprob": { + "type": "number", + "title": "Logprob" + } + }, + "type": "object", + "required": ["token", "logprob"], + "title": "TopLogprob" + }, "letta__serialize_schemas__pydantic_agent_schema__AgentSchema": { "properties": { "agent_type": { @@ -48406,6 +51207,42 @@ "type": "object", "title": "ToolExecuteRequest" }, + "openai__types__chat__chat_completion__ChoiceLogprobs": { + "properties": { + "content": { + "anyOf": [ + { + "items": { + "$ref": "#/components/schemas/openai__types__chat__chat_completion_token_logprob__ChatCompletionTokenLogprob" + }, + "type": "array" + }, + { + "type": "null" + } + ], + "title": "Content" + }, + "refusal": { + "anyOf": [ + { + "items": { + "$ref": "#/components/schemas/openai__types__chat__chat_completion_token_logprob__ChatCompletionTokenLogprob" + }, + "type": "array" + }, + { + "type": "null" + } + ], + "title": "Refusal" + } + }, + "additionalProperties": true, + "type": "object", + "title": "ChoiceLogprobs", + "description": "Log probability information for the choice." + }, "openai__types__chat__chat_completion_message_function_tool_call__Function": { "properties": { "arguments": { @@ -48439,6 +51276,73 @@ "title": "Function", "description": "The function that the model called." }, + "openai__types__chat__chat_completion_token_logprob__ChatCompletionTokenLogprob": { + "properties": { + "token": { + "type": "string", + "title": "Token" + }, + "bytes": { + "anyOf": [ + { + "items": { + "type": "integer" + }, + "type": "array" + }, + { + "type": "null" + } + ], + "title": "Bytes" + }, + "logprob": { + "type": "number", + "title": "Logprob" + }, + "top_logprobs": { + "items": { + "$ref": "#/components/schemas/openai__types__chat__chat_completion_token_logprob__TopLogprob" + }, + "type": "array", + "title": "Top Logprobs" + } + }, + "additionalProperties": true, + "type": "object", + "required": ["token", "logprob", "top_logprobs"], + "title": "ChatCompletionTokenLogprob" + }, + "openai__types__chat__chat_completion_token_logprob__TopLogprob": { + "properties": { + "token": { + "type": "string", + "title": "Token" + }, + "bytes": { + "anyOf": [ + { + "items": { + "type": "integer" + }, + "type": "array" + }, + { + "type": "null" + } + ], + "title": "Bytes" + }, + "logprob": { + "type": "number", + "title": "Logprob" + } + }, + "additionalProperties": true, + "type": "object", + "required": ["token", "logprob"], + "title": "TopLogprob" + }, "LettaMessageUnion": { "oneOf": [ { @@ -48487,8 +51391,8 @@ "assistant_message": "#/components/schemas/AssistantMessage", "approval_request_message": "#/components/schemas/ApprovalRequestMessage", "approval_response_message": "#/components/schemas/ApprovalResponseMessage", - "summary": "#/components/schemas/SummaryMessage", - "event": "#/components/schemas/EventMessage" + "summary_message": "#/components/schemas/SummaryMessage", + "event_message": "#/components/schemas/EventMessage" } } }, diff --git a/letta/__init__.py b/letta/__init__.py index 5019bbad..9de25d7b 100644 --- a/letta/__init__.py +++ b/letta/__init__.py @@ -5,7 +5,7 @@ try: __version__ = version("letta") except PackageNotFoundError: # Fallback for development installations - __version__ = "0.16.4" + __version__ = "0.16.5" if os.environ.get("LETTA_VERSION"): __version__ = os.environ["LETTA_VERSION"] @@ -16,26 +16,32 @@ try: from letta.settings import DatabaseChoice, settings if settings.database_engine == DatabaseChoice.SQLITE: - from letta.orm import sqlite_functions + from letta.orm import sqlite_functions # noqa: F401 except ImportError: # If sqlite_vec is not installed, it's fine for client usage pass # # imports for easier access -from letta.schemas.agent import AgentState -from letta.schemas.block import Block -from letta.schemas.embedding_config import EmbeddingConfig -from letta.schemas.enums import JobStatus -from letta.schemas.file import FileMetadata -from letta.schemas.job import Job -from letta.schemas.letta_message import LettaMessage, LettaPing -from letta.schemas.letta_stop_reason import LettaStopReason -from letta.schemas.llm_config import LLMConfig -from letta.schemas.memory import ArchivalMemorySummary, BasicBlockMemory, ChatMemory, Memory, RecallMemorySummary -from letta.schemas.message import Message -from letta.schemas.organization import Organization -from letta.schemas.passage import Passage -from letta.schemas.source import Source -from letta.schemas.tool import Tool -from letta.schemas.usage import LettaUsageStatistics -from letta.schemas.user import User +from letta.schemas.agent import AgentState as AgentState +from letta.schemas.block import Block as Block +from letta.schemas.embedding_config import EmbeddingConfig as EmbeddingConfig +from letta.schemas.enums import JobStatus as JobStatus +from letta.schemas.file import FileMetadata as FileMetadata +from letta.schemas.job import Job as Job +from letta.schemas.letta_message import LettaErrorMessage as LettaErrorMessage, LettaMessage as LettaMessage, LettaPing as LettaPing +from letta.schemas.letta_stop_reason import LettaStopReason as LettaStopReason +from letta.schemas.llm_config import LLMConfig as LLMConfig +from letta.schemas.memory import ( + ArchivalMemorySummary as ArchivalMemorySummary, + BasicBlockMemory as BasicBlockMemory, + ChatMemory as ChatMemory, + Memory as Memory, + RecallMemorySummary as RecallMemorySummary, +) +from letta.schemas.message import Message as Message +from letta.schemas.organization import Organization as Organization +from letta.schemas.passage import Passage as Passage +from letta.schemas.source import Source as Source +from letta.schemas.tool import Tool as Tool +from letta.schemas.usage import LettaUsageStatistics as LettaUsageStatistics +from letta.schemas.user import User as User diff --git a/letta/adapters/letta_llm_adapter.py b/letta/adapters/letta_llm_adapter.py index b00a8edb..49e99c49 100644 --- a/letta/adapters/letta_llm_adapter.py +++ b/letta/adapters/letta_llm_adapter.py @@ -2,10 +2,11 @@ from abc import ABC, abstractmethod from typing import AsyncGenerator from letta.llm_api.llm_client_base import LLMClientBase +from letta.schemas.enums import LLMCallType from letta.schemas.letta_message import LettaMessage from letta.schemas.letta_message_content import ReasoningContent, RedactedReasoningContent, TextContent from letta.schemas.llm_config import LLMConfig -from letta.schemas.openai.chat_completion_response import ChatCompletionResponse, ToolCall +from letta.schemas.openai.chat_completion_response import ChatCompletionResponse, ChoiceLogprobs, ToolCall from letta.schemas.usage import LettaUsageStatistics from letta.schemas.user import User from letta.services.telemetry_manager import TelemetryManager @@ -24,6 +25,7 @@ class LettaLLMAdapter(ABC): self, llm_client: LLMClientBase, llm_config: LLMConfig, + call_type: LLMCallType, agent_id: str | None = None, agent_tags: list[str] | None = None, run_id: str | None = None, @@ -32,6 +34,7 @@ class LettaLLMAdapter(ABC): ) -> None: self.llm_client: LLMClientBase = llm_client self.llm_config: LLMConfig = llm_config + self.call_type: LLMCallType = call_type self.agent_id: str | None = agent_id self.agent_tags: list[str] | None = agent_tags self.run_id: str | None = run_id @@ -45,9 +48,14 @@ class LettaLLMAdapter(ABC): self.content: list[TextContent | ReasoningContent | RedactedReasoningContent] | None = None self.tool_call: ToolCall | None = None self.tool_calls: list[ToolCall] = [] + self.logprobs: ChoiceLogprobs | None = None + # SGLang native endpoint data (for multi-turn RL training) + self.output_ids: list[int] | None = None + self.output_token_logprobs: list[list[float]] | None = None self.usage: LettaUsageStatistics = LettaUsageStatistics() self.telemetry_manager: TelemetryManager = TelemetryManager() self.llm_request_finish_timestamp_ns: int | None = None + self._finish_reason: str | None = None @abstractmethod async def invoke_llm( @@ -85,6 +93,8 @@ class LettaLLMAdapter(ABC): Returns: str | None: The finish_reason if available, None otherwise """ + if self._finish_reason is not None: + return self._finish_reason if self.chat_completions_response and self.chat_completions_response.choices: return self.chat_completions_response.choices[0].finish_reason return None diff --git a/letta/adapters/letta_llm_request_adapter.py b/letta/adapters/letta_llm_request_adapter.py index 5e472a35..221e2a76 100644 --- a/letta/adapters/letta_llm_request_adapter.py +++ b/letta/adapters/letta_llm_request_adapter.py @@ -2,7 +2,7 @@ from typing import AsyncGenerator from letta.adapters.letta_llm_adapter import LettaLLMAdapter from letta.helpers.datetime_helpers import get_utc_timestamp_ns -from letta.otel.tracing import log_attributes, log_event, safe_json_dumps, trace_method +from letta.otel.tracing import log_attributes, safe_json_dumps, trace_method from letta.schemas.letta_message import LettaMessage from letta.schemas.letta_message_content import OmittedReasoningContent, ReasoningContent, TextContent from letta.schemas.provider_trace import ProviderTrace @@ -66,7 +66,13 @@ class LettaLLMRequestAdapter(LettaLLMAdapter): self.reasoning_content = [OmittedReasoningContent()] elif self.chat_completions_response.choices[0].message.content: # Reasoning placed into content for legacy reasons - self.reasoning_content = [TextContent(text=self.chat_completions_response.choices[0].message.content)] + # Carry thought_signature on TextContent when ReasoningContent doesn't exist to hold it + self.reasoning_content = [ + TextContent( + text=self.chat_completions_response.choices[0].message.content, + signature=self.chat_completions_response.choices[0].message.reasoning_content_signature, + ) + ] else: # logger.info("No reasoning content found.") self.reasoning_content = None @@ -77,6 +83,9 @@ class LettaLLMRequestAdapter(LettaLLMAdapter): else: self.tool_call = None + # Extract logprobs if present + self.logprobs = self.chat_completions_response.choices[0].logprobs + # Extract usage statistics self.usage.step_count = 1 self.usage.completion_tokens = self.chat_completions_response.usage.completion_tokens @@ -127,6 +136,7 @@ class LettaLLMRequestAdapter(LettaLLMAdapter): agent_id=self.agent_id, agent_tags=self.agent_tags, run_id=self.run_id, + call_type=self.call_type, org_id=self.org_id, user_id=self.user_id, llm_config=self.llm_config.model_dump() if self.llm_config else None, diff --git a/letta/adapters/letta_llm_stream_adapter.py b/letta/adapters/letta_llm_stream_adapter.py index 4ae64e91..76fc6d65 100644 --- a/letta/adapters/letta_llm_stream_adapter.py +++ b/letta/adapters/letta_llm_stream_adapter.py @@ -1,16 +1,16 @@ from typing import AsyncGenerator from letta.adapters.letta_llm_adapter import LettaLLMAdapter +from letta.errors import LLMError from letta.helpers.datetime_helpers import get_utc_timestamp_ns from letta.interfaces.anthropic_streaming_interface import AnthropicStreamingInterface from letta.interfaces.openai_streaming_interface import OpenAIStreamingInterface from letta.llm_api.llm_client_base import LLMClientBase from letta.otel.tracing import log_attributes, safe_json_dumps, trace_method -from letta.schemas.enums import ProviderType +from letta.schemas.enums import LLMCallType, ProviderType from letta.schemas.letta_message import LettaMessage from letta.schemas.llm_config import LLMConfig from letta.schemas.provider_trace import ProviderTrace -from letta.schemas.usage import LettaUsageStatistics from letta.schemas.user import User from letta.settings import settings from letta.utils import safe_create_task @@ -30,13 +30,23 @@ class LettaLLMStreamAdapter(LettaLLMAdapter): self, llm_client: LLMClientBase, llm_config: LLMConfig, + call_type: LLMCallType, agent_id: str | None = None, agent_tags: list[str] | None = None, run_id: str | None = None, org_id: str | None = None, user_id: str | None = None, ) -> None: - super().__init__(llm_client, llm_config, agent_id=agent_id, agent_tags=agent_tags, run_id=run_id, org_id=org_id, user_id=user_id) + super().__init__( + llm_client, + llm_config, + call_type=call_type, + agent_id=agent_id, + agent_tags=agent_tags, + run_id=run_id, + org_id=org_id, + user_id=user_id, + ) self.interface: OpenAIStreamingInterface | AnthropicStreamingInterface | None = None async def invoke_llm( @@ -88,11 +98,23 @@ class LettaLLMStreamAdapter(LettaLLMAdapter): # Extract optional parameters # ttft_span = kwargs.get('ttft_span', None) + request_start_ns = get_utc_timestamp_ns() + # Start the streaming request (map provider errors to common LLMError types) try: stream = await self.llm_client.stream_async(request_data, self.llm_config) except Exception as e: - raise self.llm_client.handle_llm_error(e) + self.llm_request_finish_timestamp_ns = get_utc_timestamp_ns() + latency_ms = int((self.llm_request_finish_timestamp_ns - request_start_ns) / 1_000_000) + await self.llm_client.log_provider_trace_async( + request_data=request_data, + response_json=None, + llm_config=self.llm_config, + latency_ms=latency_ms, + error_msg=str(e), + error_type=type(e).__name__, + ) + raise self.llm_client.handle_llm_error(e, llm_config=self.llm_config) # Process the stream and yield chunks immediately for TTFT # Wrap in error handling to convert provider errors to common LLMError types @@ -101,7 +123,19 @@ class LettaLLMStreamAdapter(LettaLLMAdapter): # Yield each chunk immediately as it arrives yield chunk except Exception as e: - raise self.llm_client.handle_llm_error(e) + self.llm_request_finish_timestamp_ns = get_utc_timestamp_ns() + latency_ms = int((self.llm_request_finish_timestamp_ns - request_start_ns) / 1_000_000) + await self.llm_client.log_provider_trace_async( + request_data=request_data, + response_json=None, + llm_config=self.llm_config, + latency_ms=latency_ms, + error_msg=str(e), + error_type=type(e).__name__, + ) + if isinstance(e, LLMError): + raise + raise self.llm_client.handle_llm_error(e, llm_config=self.llm_config) # After streaming completes, extract the accumulated data self.llm_request_finish_timestamp_ns = get_utc_timestamp_ns() @@ -109,7 +143,7 @@ class LettaLLMStreamAdapter(LettaLLMAdapter): # Extract tool call from the interface try: self.tool_call = self.interface.get_tool_call_object() - except ValueError as e: + except ValueError: # No tool call, handle upstream self.tool_call = None @@ -183,6 +217,7 @@ class LettaLLMStreamAdapter(LettaLLMAdapter): agent_id=self.agent_id, agent_tags=self.agent_tags, run_id=self.run_id, + call_type=self.call_type, org_id=self.org_id, user_id=self.user_id, llm_config=self.llm_config.model_dump() if self.llm_config else None, diff --git a/letta/adapters/sglang_native_adapter.py b/letta/adapters/sglang_native_adapter.py new file mode 100644 index 00000000..cab8b267 --- /dev/null +++ b/letta/adapters/sglang_native_adapter.py @@ -0,0 +1,515 @@ +""" +SGLang Native Adapter for multi-turn RL training. + +This adapter uses SGLang's native /generate endpoint instead of the OpenAI-compatible +endpoint to get token IDs and per-token logprobs, which are essential for proper +multi-turn RL training with loss masking. + +Uses HuggingFace tokenizer's apply_chat_template() for proper tool formatting. +""" + +import json +import re +import time +import uuid +from typing import Any, AsyncGenerator, Optional + +from letta.adapters.simple_llm_request_adapter import SimpleLLMRequestAdapter +from letta.helpers.datetime_helpers import get_utc_timestamp_ns +from letta.llm_api.sglang_native_client import SGLangNativeClient +from letta.log import get_logger +from letta.schemas.letta_message import LettaMessage +from letta.schemas.letta_message_content import TextContent +from letta.schemas.openai.chat_completion_response import ( + ChatCompletionResponse, + ChatCompletionTokenLogprob, + Choice, + ChoiceLogprobs, + FunctionCall, + Message as ChoiceMessage, + ToolCall, + UsageStatistics, +) + +logger = get_logger(__name__) + +# Global tokenizer cache +_tokenizer_cache: dict[str, Any] = {} + + +class SGLangNativeAdapter(SimpleLLMRequestAdapter): + """ + Adapter that uses SGLang's native /generate endpoint for multi-turn RL training. + + Key differences from SimpleLLMRequestAdapter: + - Uses /generate instead of /v1/chat/completions + - Returns output_ids (token IDs) in addition to text + - Returns output_token_logprobs with [logprob, token_id] pairs + - Formats tools into prompt and parses tool calls from response + + These are essential for building accurate loss masks in multi-turn training. + """ + + def __init__(self, *args, **kwargs): + super().__init__(*args, **kwargs) + self._sglang_client: Optional[SGLangNativeClient] = None + self._tokenizer: Any = None + + def _get_tokenizer(self) -> Any: + """Get or create tokenizer for the model.""" + global _tokenizer_cache + + # Get model name from llm_config + model_name = self.llm_config.model + if not model_name: + logger.warning("No model name in llm_config, cannot load tokenizer") + return None + + # Check cache + if model_name in _tokenizer_cache: + return _tokenizer_cache[model_name] + + try: + from transformers import AutoTokenizer + + logger.info(f"Loading tokenizer for model: {model_name}") + tokenizer = AutoTokenizer.from_pretrained(model_name, trust_remote_code=True) + _tokenizer_cache[model_name] = tokenizer + return tokenizer + except ImportError: + logger.warning("transformers not installed, falling back to manual formatting") + return None + except Exception as e: + logger.warning(f"Failed to load tokenizer: {e}, falling back to manual formatting") + return None + + def _get_sglang_client(self) -> SGLangNativeClient: + """Get or create SGLang native client.""" + if self._sglang_client is None: + # Get base URL from llm_config, removing /v1 suffix if present + base_url = self.llm_config.model_endpoint or "" + # SGLang local instances typically don't need API key + self._sglang_client = SGLangNativeClient( + base_url=base_url, + api_key=None, + ) + return self._sglang_client + + def _format_tools_for_prompt(self, tools: list) -> str: + """ + Format tools in Qwen3 chat template format for the system prompt. + + This matches the exact format produced by Qwen3's tokenizer.apply_chat_template() + with tools parameter. + """ + if not tools: + return "" + + # Format each tool as JSON (matching Qwen3 template exactly) + tool_jsons = [] + for tool in tools: + # Handle both dict and object formats + if isinstance(tool, dict): + # Already in OpenAI format + tool_jsons.append(json.dumps(tool)) + else: + # Convert object to dict + tool_dict = { + "type": "function", + "function": { + "name": getattr(getattr(tool, "function", tool), "name", ""), + "description": getattr(getattr(tool, "function", tool), "description", ""), + "parameters": getattr(getattr(tool, "function", tool), "parameters", {}), + }, + } + tool_jsons.append(json.dumps(tool_dict)) + + # Use exact Qwen3 format + tools_section = ( + "\n\n# Tools\n\n" + "You may call one or more functions to assist with the user query.\n\n" + "You are provided with function signatures within XML tags:\n" + "\n" + "\n".join(tool_jsons) + "\n" + "\n\n" + "For each function call, return a json object with function name and arguments within XML tags:\n" + "\n" + '{"name": , "arguments": }\n' + "" + ) + + return tools_section + + def _convert_messages_to_openai_format(self, messages: list) -> list[dict]: + """Convert Letta Message objects to OpenAI-style message dicts.""" + openai_messages = [] + + for msg in messages: + # Handle both dict and Pydantic Message objects + if hasattr(msg, "role"): + role = msg.role + content = msg.content if hasattr(msg, "content") else "" + # Handle content that might be a list of content parts + if isinstance(content, list): + content = " ".join([c.text if hasattr(c, "text") else str(c) for c in content]) + elif content is None: + content = "" + tool_calls = getattr(msg, "tool_calls", None) + tool_call_id = getattr(msg, "tool_call_id", None) + name = getattr(msg, "name", None) + else: + role = msg.get("role", "user") + content = msg.get("content", "") + tool_calls = msg.get("tool_calls", None) + tool_call_id = msg.get("tool_call_id", None) + name = msg.get("name", None) + + openai_msg = {"role": role, "content": content} + + if tool_calls: + # Convert tool calls to OpenAI format + openai_tool_calls = [] + for tc in tool_calls: + if hasattr(tc, "function"): + tc_dict = { + "id": getattr(tc, "id", f"call_{uuid.uuid4().hex[:8]}"), + "type": "function", + "function": { + "name": tc.function.name, + "arguments": tc.function.arguments + if isinstance(tc.function.arguments, str) + else json.dumps(tc.function.arguments), + }, + } + else: + tc_dict = { + "id": tc.get("id", f"call_{uuid.uuid4().hex[:8]}"), + "type": "function", + "function": tc.get("function", {}), + } + openai_tool_calls.append(tc_dict) + openai_msg["tool_calls"] = openai_tool_calls + + if tool_call_id: + openai_msg["tool_call_id"] = tool_call_id + + if name and role == "tool": + openai_msg["name"] = name + + openai_messages.append(openai_msg) + + return openai_messages + + def _convert_tools_to_openai_format(self, tools: list) -> list[dict]: + """Convert tools to OpenAI format for tokenizer.""" + openai_tools = [] + for tool in tools: + if isinstance(tool, dict): + # Already a dict, ensure it's in the right format + if "function" in tool: + openai_tools.append(tool) + else: + # Might be the function directly + openai_tools.append({"type": "function", "function": tool}) + else: + # Convert object to dict + func = getattr(tool, "function", tool) + tool_dict = { + "type": "function", + "function": { + "name": getattr(func, "name", ""), + "description": getattr(func, "description", ""), + "parameters": getattr(func, "parameters", {}), + }, + } + openai_tools.append(tool_dict) + return openai_tools + + def _format_messages_to_text(self, messages: list, tools: list) -> str: + """ + Format messages to text using tokenizer's apply_chat_template if available. + + Falls back to manual formatting if tokenizer is not available. + """ + tokenizer = self._get_tokenizer() + + if tokenizer is not None: + # Use tokenizer's apply_chat_template for proper formatting + openai_messages = self._convert_messages_to_openai_format(messages) + openai_tools = self._convert_tools_to_openai_format(tools) if tools else None + + try: + formatted = tokenizer.apply_chat_template( + openai_messages, + tokenize=False, + add_generation_prompt=True, + tools=openai_tools, + ) + logger.debug(f"Formatted prompt using tokenizer ({len(formatted)} chars)") + return formatted + except Exception as e: + logger.warning(f"apply_chat_template failed: {e}, falling back to manual formatting") + + # Fallback to manual formatting + return self._format_messages_to_text_manual(messages, tools) + + def _format_messages_to_text_manual(self, messages: list, tools: list) -> str: + """Manual fallback formatting for when tokenizer is not available.""" + formatted_parts = [] + tools_section = self._format_tools_for_prompt(tools) + + for msg in messages: + # Handle both dict and Pydantic Message objects + if hasattr(msg, "role"): + role = msg.role + content = msg.content if hasattr(msg, "content") else "" + if isinstance(content, list): + content = " ".join([c.text if hasattr(c, "text") else str(c) for c in content]) + elif content is None: + content = "" + tool_calls = getattr(msg, "tool_calls", None) + else: + role = msg.get("role", "user") + content = msg.get("content", "") + tool_calls = msg.get("tool_calls", None) + + if role == "system": + system_content = content + tools_section if tools_section else content + formatted_parts.append(f"<|im_start|>system\n{system_content}<|im_end|>") + tools_section = "" + elif role == "user": + formatted_parts.append(f"<|im_start|>user\n{content}<|im_end|>") + elif role == "assistant": + if tool_calls: + tc_parts = [] + for tc in tool_calls: + if hasattr(tc, "function"): + tc_name = tc.function.name + tc_args = tc.function.arguments + else: + tc_name = tc.get("function", {}).get("name", "") + tc_args = tc.get("function", {}).get("arguments", "{}") + + if isinstance(tc_args, str): + try: + tc_args = json.loads(tc_args) + except Exception: + pass + + tc_parts.append(f'\n{{"name": "{tc_name}", "arguments": {json.dumps(tc_args)}}}\n') + + assistant_content = content + "\n" + "\n".join(tc_parts) if content else "\n".join(tc_parts) + formatted_parts.append(f"<|im_start|>assistant\n{assistant_content}<|im_end|>") + elif content: + formatted_parts.append(f"<|im_start|>assistant\n{content}<|im_end|>") + elif role == "tool": + formatted_parts.append(f"<|im_start|>user\n\n{content}\n<|im_end|>") + + formatted_parts.append("<|im_start|>assistant\n") + return "\n".join(formatted_parts) + + def _parse_tool_calls(self, text: str) -> list[ToolCall]: + """ + Parse tool calls from response text. + + Looks for patterns like: + + {"name": "tool_name", "arguments": {...}} + + """ + tool_calls = [] + + # Find all tool_call blocks + pattern = r"\s*(\{.*?\})\s*" + matches = re.findall(pattern, text, re.DOTALL) + + for match in matches: + try: + tc_data = json.loads(match) + name = tc_data.get("name", "") + arguments = tc_data.get("arguments", {}) + + if isinstance(arguments, dict): + arguments = json.dumps(arguments) + + tool_call = ToolCall( + id=f"call_{uuid.uuid4().hex[:8]}", + type="function", + function=FunctionCall( + name=name, + arguments=arguments, + ), + ) + tool_calls.append(tool_call) + except json.JSONDecodeError as e: + logger.warning(f"Failed to parse tool call JSON: {e}") + continue + + return tool_calls + + def _extract_content_without_tool_calls(self, text: str) -> str: + """Extract content from response, removing tool_call blocks.""" + # Remove tool_call blocks + cleaned = re.sub(r".*?", "", text, flags=re.DOTALL) + # Clean up whitespace + cleaned = cleaned.strip() + return cleaned + + async def invoke_llm( + self, + request_data: dict, + messages: list, + tools: list, + use_assistant_message: bool, + requires_approval_tools: list[str] = [], + step_id: str | None = None, + actor: str | None = None, + ) -> AsyncGenerator[LettaMessage | None, None]: + """ + Execute LLM request using SGLang native endpoint. + + This method: + 1. Formats messages and tools to text using chat template + 2. Calls SGLang native /generate endpoint + 3. Extracts output_ids and output_token_logprobs + 4. Parses tool calls from response + 5. Converts response to standard format + """ + self.request_data = request_data + + # Get sampling params from request_data + sampling_params = { + "temperature": request_data.get("temperature", 0.7), + "max_new_tokens": request_data.get("max_tokens", 4096), + "top_p": request_data.get("top_p", 0.9), + } + + # Format messages to text (includes tools in prompt) + text_input = self._format_messages_to_text(messages, tools) + + # Call SGLang native endpoint + client = self._get_sglang_client() + + try: + response = await client.generate( + text=text_input, + sampling_params=sampling_params, + return_logprob=True, + ) + except Exception as e: + logger.error(f"SGLang native endpoint error: {e}") + raise + + self.llm_request_finish_timestamp_ns = get_utc_timestamp_ns() + + # Store native response data + self.response_data = response + + # Extract SGLang native data + self.output_ids = response.get("output_ids") + # output_token_logprobs is inside meta_info + meta_info = response.get("meta_info", {}) + self.output_token_logprobs = meta_info.get("output_token_logprobs") + + # Extract text response + text_response = response.get("text", "") + + # Remove trailing end token if present + if text_response.endswith("<|im_end|>"): + text_response = text_response[:-10] + + # Parse tool calls from response + parsed_tool_calls = self._parse_tool_calls(text_response) + + # Extract content (text without tool_call blocks) + content_text = self._extract_content_without_tool_calls(text_response) + + # Determine finish reason + meta_info = response.get("meta_info", {}) + finish_reason_info = meta_info.get("finish_reason", {}) + if isinstance(finish_reason_info, dict): + finish_reason = finish_reason_info.get("type", "stop") + else: + finish_reason = "stop" + + # If we have tool calls, set finish_reason to tool_calls + if parsed_tool_calls: + finish_reason = "tool_calls" + + # Convert to standard ChatCompletionResponse format for compatibility + # Build logprobs in OpenAI format from SGLang format + logprobs_content = None + if self.output_token_logprobs: + logprobs_content = [] + for i, lp_data in enumerate(self.output_token_logprobs): + # SGLang format: [logprob, token_id, top_logprob] + logprob = lp_data[0] if len(lp_data) > 0 else 0.0 + token_id = lp_data[1] if len(lp_data) > 1 else 0 + logprobs_content.append( + ChatCompletionTokenLogprob( + token=str(token_id), + logprob=logprob, + bytes=None, + top_logprobs=[], + ) + ) + + choice_logprobs = ChoiceLogprobs(content=logprobs_content) if logprobs_content else None + + # Build chat completion response + prompt_tokens = meta_info.get("prompt_tokens", 0) + completion_tokens = len(self.output_ids) if self.output_ids else 0 + + self.chat_completions_response = ChatCompletionResponse( + id=meta_info.get("id", "sglang-native"), + created=int(time.time()), + choices=[ + Choice( + finish_reason=finish_reason, + index=0, + message=ChoiceMessage( + role="assistant", + content=content_text if content_text else None, + tool_calls=parsed_tool_calls if parsed_tool_calls else None, + ), + logprobs=choice_logprobs, + ) + ], + usage=UsageStatistics( + prompt_tokens=prompt_tokens, + completion_tokens=completion_tokens, + total_tokens=prompt_tokens + completion_tokens, + ), + ) + + # Extract content + if content_text: + self.content = [TextContent(text=content_text)] + else: + self.content = None + + # No reasoning content from native endpoint + self.reasoning_content = None + + # Set tool calls + self.tool_calls = parsed_tool_calls + self.tool_call = parsed_tool_calls[0] if parsed_tool_calls else None + + # Set logprobs + self.logprobs = choice_logprobs + + # Extract usage statistics + self.usage.step_count = 1 + self.usage.completion_tokens = completion_tokens + self.usage.prompt_tokens = prompt_tokens + self.usage.total_tokens = prompt_tokens + completion_tokens + + self.log_provider_trace(step_id=step_id, actor=actor) + + logger.info( + f"SGLang native response: {len(self.output_ids or [])} tokens, " + f"{len(self.output_token_logprobs or [])} logprobs, " + f"{len(parsed_tool_calls)} tool calls" + ) + + yield None + return diff --git a/letta/adapters/simple_llm_request_adapter.py b/letta/adapters/simple_llm_request_adapter.py index cf2dc741..f67e7dc9 100644 --- a/letta/adapters/simple_llm_request_adapter.py +++ b/letta/adapters/simple_llm_request_adapter.py @@ -1,7 +1,9 @@ from typing import AsyncGenerator from letta.adapters.letta_llm_request_adapter import LettaLLMRequestAdapter +from letta.errors import LLMError from letta.helpers.datetime_helpers import get_utc_timestamp_ns +from letta.schemas.enums import LLMCallType from letta.schemas.letta_message import LettaMessage from letta.schemas.letta_message_content import OmittedReasoningContent, ReasoningContent, TextContent from letta.schemas.usage import normalize_cache_tokens, normalize_reasoning_tokens @@ -45,7 +47,7 @@ class SimpleLLMRequestAdapter(LettaLLMRequestAdapter): agent_id=self.agent_id, agent_tags=self.agent_tags, run_id=self.run_id, - call_type="agent_step", + call_type=LLMCallType.agent_step, org_id=self.org_id, user_id=self.user_id, llm_config=self.llm_config.model_dump() if self.llm_config else None, @@ -53,7 +55,9 @@ class SimpleLLMRequestAdapter(LettaLLMRequestAdapter): try: self.response_data = await self.llm_client.request_async_with_telemetry(request_data, self.llm_config) except Exception as e: - raise self.llm_client.handle_llm_error(e) + if isinstance(e, LLMError): + raise + raise self.llm_client.handle_llm_error(e, llm_config=self.llm_config) self.llm_request_finish_timestamp_ns = get_utc_timestamp_ns() @@ -80,7 +84,12 @@ class SimpleLLMRequestAdapter(LettaLLMRequestAdapter): if self.chat_completions_response.choices[0].message.content: # NOTE: big difference - 'content' goes into 'content' # Reasoning placed into content for legacy reasons - self.content = [TextContent(text=self.chat_completions_response.choices[0].message.content)] + # Carry thought_signature on TextContent when ReasoningContent doesn't exist to hold it + # (e.g. Gemini 2.5 Flash with include_thoughts=False still returns thought_signature) + orphan_sig = ( + self.chat_completions_response.choices[0].message.reasoning_content_signature if not self.reasoning_content else None + ) + self.content = [TextContent(text=self.chat_completions_response.choices[0].message.content, signature=orphan_sig)] else: self.content = None @@ -93,6 +102,9 @@ class SimpleLLMRequestAdapter(LettaLLMRequestAdapter): self.tool_calls = list(tool_calls) self.tool_call = self.tool_calls[0] if self.tool_calls else None + # Extract logprobs if present + self.logprobs = self.chat_completions_response.choices[0].logprobs + # Extract usage statistics self.usage.step_count = 1 self.usage.completion_tokens = self.chat_completions_response.usage.completion_tokens diff --git a/letta/adapters/simple_llm_stream_adapter.py b/letta/adapters/simple_llm_stream_adapter.py index 2313ff2b..26c054fd 100644 --- a/letta/adapters/simple_llm_stream_adapter.py +++ b/letta/adapters/simple_llm_stream_adapter.py @@ -1,7 +1,7 @@ -import json from typing import AsyncGenerator, List from letta.adapters.letta_llm_stream_adapter import LettaLLMStreamAdapter +from letta.errors import LLMError from letta.log import get_logger logger = get_logger(__name__) @@ -70,6 +70,9 @@ class SimpleLLMStreamAdapter(LettaLLMStreamAdapter): # Store request data self.request_data = request_data + # Track request start time for latency calculation + request_start_ns = get_utc_timestamp_ns() + # Get cancellation event for this run to enable graceful cancellation (before branching) cancellation_event = get_cancellation_event_for_run(self.run_id) if self.run_id else None @@ -138,7 +141,19 @@ class SimpleLLMStreamAdapter(LettaLLMStreamAdapter): else: stream = await self.llm_client.stream_async(request_data, self.llm_config) except Exception as e: - raise self.llm_client.handle_llm_error(e) + self.llm_request_finish_timestamp_ns = get_utc_timestamp_ns() + latency_ms = int((self.llm_request_finish_timestamp_ns - request_start_ns) / 1_000_000) + await self.llm_client.log_provider_trace_async( + request_data=request_data, + response_json=None, + llm_config=self.llm_config, + latency_ms=latency_ms, + error_msg=str(e), + error_type=type(e).__name__, + ) + if isinstance(e, LLMError): + raise + raise self.llm_client.handle_llm_error(e, llm_config=self.llm_config) # Process the stream and yield chunks immediately for TTFT try: @@ -146,8 +161,19 @@ class SimpleLLMStreamAdapter(LettaLLMStreamAdapter): # Yield each chunk immediately as it arrives yield chunk except Exception as e: - # Map provider-specific errors during streaming to common LLMError types - raise self.llm_client.handle_llm_error(e) + self.llm_request_finish_timestamp_ns = get_utc_timestamp_ns() + latency_ms = int((self.llm_request_finish_timestamp_ns - request_start_ns) / 1_000_000) + await self.llm_client.log_provider_trace_async( + request_data=request_data, + response_json=None, + llm_config=self.llm_config, + latency_ms=latency_ms, + error_msg=str(e), + error_type=type(e).__name__, + ) + if isinstance(e, LLMError): + raise + raise self.llm_client.handle_llm_error(e, llm_config=self.llm_config) # After streaming completes, extract the accumulated data self.llm_request_finish_timestamp_ns = get_utc_timestamp_ns() @@ -172,6 +198,22 @@ class SimpleLLMStreamAdapter(LettaLLMStreamAdapter): # Store any additional data from the interface self.message_id = self.interface.letta_message_id + # Populate finish_reason for downstream continuation logic. + # In Responses streaming, max_output_tokens is expressed via incomplete_details.reason. + if hasattr(self.interface, "final_response") and self.interface.final_response is not None: + resp = self.interface.final_response + incomplete_details = getattr(resp, "incomplete_details", None) + incomplete_reason = getattr(incomplete_details, "reason", None) if incomplete_details else None + if incomplete_reason == "max_output_tokens": + self._finish_reason = "length" + elif incomplete_reason == "content_filter": + self._finish_reason = "content_filter" + elif incomplete_reason is not None: + # Unknown incomplete reason — preserve it as-is for diagnostics + self._finish_reason = incomplete_reason + elif getattr(resp, "status", None) == "completed": + self._finish_reason = "stop" + # Log request and response data self.log_provider_trace(step_id=step_id, actor=actor) @@ -232,6 +274,7 @@ class SimpleLLMStreamAdapter(LettaLLMStreamAdapter): agent_id=self.agent_id, agent_tags=self.agent_tags, run_id=self.run_id, + call_type=self.call_type, org_id=self.org_id, user_id=self.user_id, llm_config=self.llm_config.model_dump() if self.llm_config else None, diff --git a/letta/agents/base_agent.py b/letta/agents/base_agent.py index e072146d..326dc60a 100644 --- a/letta/agents/base_agent.py +++ b/letta/agents/base_agent.py @@ -123,32 +123,17 @@ class BaseAgent(ABC): curr_system_message = in_context_messages[0] curr_system_message_text = curr_system_message.content[0].text - # extract the dynamic section that includes memory blocks, tool rules, and directories - # this avoids timestamp comparison issues - def extract_dynamic_section(text): - start_marker = "" - end_marker = "" - - start_idx = text.find(start_marker) - end_idx = text.find(end_marker) - - if start_idx != -1 and end_idx != -1: - return text[start_idx:end_idx] - return text # fallback to full text if markers not found - - curr_dynamic_section = extract_dynamic_section(curr_system_message_text) - - # generate just the memory string with current state for comparison + # generate memory string with current state for comparison curr_memory_str = agent_state.memory.compile( tool_usage_rules=tool_constraint_block, sources=agent_state.sources, max_files_open=agent_state.max_files_open, llm_config=agent_state.llm_config, ) - new_dynamic_section = extract_dynamic_section(curr_memory_str) - # compare just the dynamic sections (memory blocks, tool rules, directories) - if curr_dynamic_section == new_dynamic_section: + system_prompt_changed = agent_state.system not in curr_system_message_text + memory_changed = curr_memory_str not in curr_system_message_text + if (not system_prompt_changed) and (not memory_changed): logger.debug( f"Memory and sources haven't changed for agent id={agent_state.id} and actor=({self.actor.id}, {self.actor.name}), skipping system prompt rebuild" ) @@ -183,7 +168,7 @@ class BaseAgent(ABC): actor=self.actor, project_id=agent_state.project_id, ) - return [new_system_message] + in_context_messages[1:] + return [new_system_message, *in_context_messages[1:]] else: return in_context_messages diff --git a/letta/agents/base_agent_v2.py b/letta/agents/base_agent_v2.py index d66f6f04..b6fe89ce 100644 --- a/letta/agents/base_agent_v2.py +++ b/letta/agents/base_agent_v2.py @@ -25,6 +25,11 @@ class BaseAgentV2(ABC): self.actor = actor self.logger = get_logger(agent_state.id) + @property + def agent_id(self) -> str: + """Return the agent ID for backward compatibility with code expecting self.agent_id.""" + return self.agent_state.id + @abstractmethod async def build_request( self, @@ -46,6 +51,7 @@ class BaseAgentV2(ABC): include_return_message_types: list[MessageType] | None = None, request_start_timestamp_ns: int | None = None, client_tools: list["ClientToolSchema"] | None = None, + include_compaction_messages: bool = False, # Not used in V2, but accepted for API compatibility ) -> LettaResponse: """ Execute the agent loop in blocking mode, returning all messages at once. @@ -53,6 +59,7 @@ class BaseAgentV2(ABC): Args: client_tools: Optional list of client-side tools. When called, execution pauses for client to provide tool returns. + include_compaction_messages: Not used in V2, but accepted for API compatibility. """ raise NotImplementedError @@ -66,8 +73,9 @@ class BaseAgentV2(ABC): use_assistant_message: bool = True, include_return_message_types: list[MessageType] | None = None, request_start_timestamp_ns: int | None = None, - conversation_id: str | None = None, + conversation_id: str | None = None, client_tools: list["ClientToolSchema"] | None = None, + include_compaction_messages: bool = False, # Not used in V2, but accepted for API compatibility ) -> AsyncGenerator[LettaMessage | LegacyLettaMessage | MessageStreamStatus, None]: """ Execute the agent loop in streaming mode, yielding chunks as they become available. @@ -78,5 +86,6 @@ class BaseAgentV2(ABC): Args: client_tools: Optional list of client-side tools. When called, execution pauses for client to provide tool returns. + include_compaction_messages: Not used in V2, but accepted for API compatibility. """ raise NotImplementedError diff --git a/letta/agents/ephemeral_summary_agent.py b/letta/agents/ephemeral_summary_agent.py index 3e990c9e..86b2b90a 100644 --- a/letta/agents/ephemeral_summary_agent.py +++ b/letta/agents/ephemeral_summary_agent.py @@ -8,7 +8,7 @@ from letta.log import get_logger from letta.orm.errors import NoResultFound from letta.prompts.gpt_system import get_system_text from letta.schemas.block import Block, BlockUpdate -from letta.schemas.enums import MessageRole +from letta.schemas.enums import LLMCallType, MessageRole from letta.schemas.letta_message_content import TextContent from letta.schemas.message import Message, MessageCreate from letta.schemas.user import User @@ -79,7 +79,7 @@ class EphemeralSummaryAgent(BaseAgent): content=[TextContent(text=get_system_text("summary_system_prompt"))], ) messages = await convert_message_creates_to_messages( - message_creates=[system_message_create] + input_messages, + message_creates=[system_message_create, *input_messages], agent_id=self.agent_id, timezone=agent_state.timezone, run_id=None, # TODO: add this @@ -92,7 +92,7 @@ class EphemeralSummaryAgent(BaseAgent): telemetry_manager=TelemetryManager(), agent_id=self.agent_id, agent_tags=agent_state.tags, - call_type="summarization", + call_type=LLMCallType.summarization, ) response_data = await llm_client.request_async_with_telemetry(request_data, agent_state.llm_config) response = await llm_client.convert_response_to_chat_completion(response_data, messages, agent_state.llm_config) diff --git a/letta/agents/helpers.py b/letta/agents/helpers.py index 2ce15c0f..28f5d304 100644 --- a/letta/agents/helpers.py +++ b/letta/agents/helpers.py @@ -1,10 +1,12 @@ import json -import uuid import xml.etree.ElementTree as ET -from typing import Any, Dict, List, Optional, Tuple +from typing import TYPE_CHECKING, Any, Dict, List, Optional, Tuple from uuid import UUID, uuid4 -from letta.errors import PendingApprovalError +if TYPE_CHECKING: + from letta.schemas.tool import Tool + +from letta.errors import LettaError, PendingApprovalError from letta.helpers import ToolRulesSolver from letta.helpers.datetime_helpers import get_utc_time from letta.log import get_logger @@ -233,6 +235,11 @@ async def _prepare_in_context_messages_no_persist_async( current_in_context_messages = [system_message] else: # Default mode: load messages from agent_state.message_ids + if not agent_state.message_ids: + raise LettaError( + message=f"Agent {agent_state.id} has no in-context messages. " + "This typically means the agent's system message was not initialized correctly.", + ) if agent_state.message_buffer_autoclear: # If autoclear is enabled, only include the most recent system message (usually at index 0) current_in_context_messages = [ @@ -242,6 +249,14 @@ async def _prepare_in_context_messages_no_persist_async( # Otherwise, include the full list of messages by ID for context current_in_context_messages = await message_manager.get_messages_by_ids_async(message_ids=agent_state.message_ids, actor=actor) + # Convert ToolReturnCreate to ApprovalCreate for unified processing + if input_messages[0].type == "tool_return": + tool_return_msg = input_messages[0] + input_messages = [ + ApprovalCreate(approvals=tool_return_msg.tool_returns), + *input_messages[1:], + ] + # Check for approval-related message validation if input_messages[0].type == "approval": # User is trying to send an approval response @@ -254,12 +269,31 @@ async def _prepare_in_context_messages_no_persist_async( for msg in reversed(recent_messages): if msg.role == "tool" and validate_persisted_tool_call_ids(msg, input_messages[0]): logger.info( - f"Idempotency check: Found matching tool return in recent history. " + f"Idempotency check: Found matching tool return in recent in-context history. " f"tool_returns={msg.tool_returns}, approval_response.approvals={input_messages[0].approvals}" ) approval_already_processed = True break + # If not found in context and summarization just happened, check full history + non_system_summary_messages = [ + m for m in current_in_context_messages if m.role not in (MessageRole.system, MessageRole.summary) + ] + if not approval_already_processed and len(non_system_summary_messages) == 0: + last_tool_messages = await message_manager.list_messages( + actor=actor, + agent_id=agent_state.id, + roles=[MessageRole.tool], + limit=1, + ascending=False, # Most recent first + ) + if len(last_tool_messages) == 1 and validate_persisted_tool_call_ids(last_tool_messages[0], input_messages[0]): + logger.info( + f"Idempotency check: Found matching tool return in full history (post-compaction). " + f"tool_returns={last_tool_messages[0].tool_returns}, approval_response.approvals={input_messages[0].approvals}" + ) + approval_already_processed = True + if approval_already_processed: # Approval already handled, just process follow-up messages if any or manually inject keep-alive message keep_alive_messages = input_messages[1:] or [ diff --git a/letta/agents/letta_agent.py b/letta/agents/letta_agent.py index 3b359c72..be6a378b 100644 --- a/letta/agents/letta_agent.py +++ b/letta/agents/letta_agent.py @@ -13,14 +13,13 @@ from letta.agents.ephemeral_summary_agent import EphemeralSummaryAgent from letta.agents.helpers import ( _build_rule_violation_result, _create_letta_response, - _load_last_function_response, _pop_heartbeat, _prepare_in_context_messages_no_persist_async, _safe_load_tool_call_str, generate_step_id, ) from letta.constants import DEFAULT_MAX_STEPS, NON_USER_MSG_PREFIX, REQUEST_HEARTBEAT_PARAM -from letta.errors import ContextWindowExceededError +from letta.errors import ContextWindowExceededError, LLMError from letta.helpers import ToolRulesSolver from letta.helpers.datetime_helpers import AsyncTimer, get_utc_time, get_utc_timestamp_ns, ns_to_ms from letta.helpers.reasoning_helper import scrub_inner_thoughts_from_messages @@ -35,7 +34,7 @@ from letta.otel.context import get_ctx_attributes from letta.otel.metric_registry import MetricRegistry from letta.otel.tracing import log_event, trace_method, tracer from letta.schemas.agent import AgentState, UpdateAgent -from letta.schemas.enums import JobStatus, ProviderType, StepStatus, ToolType +from letta.schemas.enums import JobStatus, LLMCallType, ProviderType, StepStatus, ToolType from letta.schemas.letta_message import MessageType from letta.schemas.letta_message_content import OmittedReasoningContent, ReasoningContent, RedactedReasoningContent, TextContent from letta.schemas.letta_response import LettaResponse @@ -49,7 +48,6 @@ from letta.schemas.openai.chat_completion_response import ( UsageStatisticsCompletionTokenDetails, UsageStatisticsPromptTokenDetails, ) -from letta.schemas.provider_trace import ProviderTrace from letta.schemas.step import StepProgression from letta.schemas.step_metrics import StepMetrics from letta.schemas.tool_execution_result import ToolExecutionResult @@ -294,6 +292,7 @@ class LettaAgent(BaseAgent): agent_step_span.set_attributes({"step_id": step_id}) step_progression = StepProgression.START + caught_exception = None should_continue = False step_metrics = StepMetrics(id=step_id) # Initialize metrics tracking @@ -312,6 +311,7 @@ class LettaAgent(BaseAgent): step_id=step_id, project_id=agent_state.project_id, status=StepStatus.PENDING, + model_handle=agent_state.llm_config.handle, ) # Only use step_id in messages if step was actually created effective_step_id = step_id if logged_step else None @@ -370,8 +370,12 @@ class LettaAgent(BaseAgent): elif response.choices[0].message.omitted_reasoning_content: reasoning = [OmittedReasoningContent()] elif response.choices[0].message.content: + # Carry thought_signature on TextContent when ReasoningContent doesn't exist to hold it reasoning = [ - TextContent(text=response.choices[0].message.content) + TextContent( + text=response.choices[0].message.content, + signature=response.choices[0].message.reasoning_content_signature, + ) ] # reasoning placed into content for legacy reasons else: self.logger.info("No reasoning content found.") @@ -409,24 +413,6 @@ class LettaAgent(BaseAgent): agent_step_span.add_event(name="step_ms", attributes={"duration_ms": ns_to_ms(step_ns)}) agent_step_span.end() - # Log LLM Trace - if settings.track_provider_trace: - await self.telemetry_manager.create_provider_trace_async( - actor=self.actor, - provider_trace=ProviderTrace( - request_json=request_data, - response_json=response_data, - step_id=step_id, - agent_id=self.agent_id, - agent_tags=agent_state.tags, - run_id=self.current_run_id, - org_id=self.actor.organization_id, - user_id=self.actor.id, - llm_config=self.agent_state.llm_config.model_dump() if self.agent_state.llm_config else None, - ), - ) - step_progression = StepProgression.LOGGED_TRACE - # stream step # TODO: improve TTFT filter_user_messages = [m for m in persisted_messages if m.role != "user"] @@ -453,6 +439,7 @@ class LettaAgent(BaseAgent): ) except Exception as e: + caught_exception = e # Handle any unexpected errors during step processing self.logger.error(f"Error during step processing: {e}") job_update_metadata = {"error": str(e)} @@ -499,8 +486,8 @@ class LettaAgent(BaseAgent): await self.step_manager.update_step_error_async( actor=self.actor, step_id=step_id, # Use original step_id for telemetry - error_type=type(e).__name__ if "e" in locals() else "Unknown", - error_message=str(e) if "e" in locals() else "Unknown error", + error_type=type(caught_exception).__name__ if caught_exception is not None else "Unknown", + error_message=str(caught_exception) if caught_exception is not None else "Unknown error", error_traceback=traceback.format_exc(), stop_reason=stop_reason, ) @@ -646,6 +633,7 @@ class LettaAgent(BaseAgent): agent_step_span.set_attributes({"step_id": step_id}) step_progression = StepProgression.START + caught_exception = None should_continue = False step_metrics = StepMetrics(id=step_id) # Initialize metrics tracking @@ -664,6 +652,7 @@ class LettaAgent(BaseAgent): step_id=step_id, project_id=agent_state.project_id, status=StepStatus.PENDING, + model_handle=agent_state.llm_config.handle, ) # Only use step_id in messages if step was actually created effective_step_id = step_id if logged_step else None @@ -720,8 +709,12 @@ class LettaAgent(BaseAgent): ) ] elif response.choices[0].message.content: + # Carry thought_signature on TextContent when ReasoningContent doesn't exist to hold it reasoning = [ - TextContent(text=response.choices[0].message.content) + TextContent( + text=response.choices[0].message.content, + signature=response.choices[0].message.reasoning_content_signature, + ) ] # reasoning placed into content for legacy reasons elif response.choices[0].message.omitted_reasoning_content: reasoning = [OmittedReasoningContent()] @@ -762,24 +755,6 @@ class LettaAgent(BaseAgent): agent_step_span.add_event(name="step_ms", attributes={"duration_ms": ns_to_ms(step_ns)}) agent_step_span.end() - # Log LLM Trace - if settings.track_provider_trace: - await self.telemetry_manager.create_provider_trace_async( - actor=self.actor, - provider_trace=ProviderTrace( - request_json=request_data, - response_json=response_data, - step_id=step_id, - agent_id=self.agent_id, - agent_tags=agent_state.tags, - run_id=self.current_run_id, - org_id=self.actor.organization_id, - user_id=self.actor.id, - llm_config=self.agent_state.llm_config.model_dump() if self.agent_state.llm_config else None, - ), - ) - step_progression = StepProgression.LOGGED_TRACE - MetricRegistry().step_execution_time_ms_histogram.record(get_utc_timestamp_ns() - step_start, get_ctx_attributes()) step_progression = StepProgression.FINISHED @@ -795,6 +770,7 @@ class LettaAgent(BaseAgent): ) except Exception as e: + caught_exception = e # Handle any unexpected errors during step processing self.logger.error(f"Error during step processing: {e}") job_update_metadata = {"error": str(e)} @@ -837,8 +813,8 @@ class LettaAgent(BaseAgent): await self.step_manager.update_step_error_async( actor=self.actor, step_id=step_id, # Use original step_id for telemetry - error_type=type(e).__name__ if "e" in locals() else "Unknown", - error_message=str(e) if "e" in locals() else "Unknown error", + error_type=type(caught_exception).__name__ if caught_exception is not None else "Unknown", + error_message=str(caught_exception) if caught_exception is not None else "Unknown error", error_traceback=traceback.format_exc(), stop_reason=stop_reason, ) @@ -1000,6 +976,7 @@ class LettaAgent(BaseAgent): agent_step_span.set_attributes({"step_id": step_id}) step_progression = StepProgression.START + caught_exception = None should_continue = False step_metrics = StepMetrics(id=step_id) # Initialize metrics tracking @@ -1018,6 +995,7 @@ class LettaAgent(BaseAgent): step_id=step_id, project_id=agent_state.project_id, status=StepStatus.PENDING, + model_handle=agent_state.llm_config.handle, ) # Only use step_id in messages if step was actually created effective_step_id = step_id if logged_step else None @@ -1152,6 +1130,8 @@ class LettaAgent(BaseAgent): "output_tokens": interface.output_tokens, }, }, + llm_config=agent_state.llm_config, + latency_ms=int(llm_request_ms), ) persisted_messages, should_continue, stop_reason = await self._handle_ai_response( tool_call, @@ -1220,41 +1200,6 @@ class LettaAgent(BaseAgent): # TODO (cliandy): the stream POST request span has ended at this point, we should tie this to the stream # log_event("agent.stream.llm_response.processed") # [4^] - # Log LLM Trace - # We are piecing together the streamed response here. - # Content here does not match the actual response schema as streams come in chunks. - if settings.track_provider_trace: - await self.telemetry_manager.create_provider_trace_async( - actor=self.actor, - provider_trace=ProviderTrace( - request_json=request_data, - response_json={ - "content": { - "tool_call": tool_call.model_dump_json(), - "reasoning": [content.model_dump_json() for content in reasoning_content], - }, - "id": interface.message_id, - "model": interface.model, - "role": "assistant", - # "stop_reason": "", - # "stop_sequence": None, - "type": "message", - "usage": { - "input_tokens": usage.prompt_tokens, - "output_tokens": usage.completion_tokens, - }, - }, - step_id=step_id, - agent_id=self.agent_id, - agent_tags=agent_state.tags, - run_id=self.current_run_id, - org_id=self.actor.organization_id, - user_id=self.actor.id, - llm_config=self.agent_state.llm_config.model_dump() if self.agent_state.llm_config else None, - ), - ) - step_progression = StepProgression.LOGGED_TRACE - if persisted_messages[-1].role != "approval": # yields tool response as this is handled from Letta and not the response from the LLM provider tool_return = [msg for msg in persisted_messages if msg.role == "tool"][-1].to_letta_messages()[0] @@ -1287,6 +1232,7 @@ class LettaAgent(BaseAgent): self.logger.warning(f"Failed to record step metrics: {metrics_error}") except Exception as e: + caught_exception = e # Handle any unexpected errors during step processing self.logger.error(f"Error during step processing: {e}") job_update_metadata = {"error": str(e)} @@ -1333,8 +1279,8 @@ class LettaAgent(BaseAgent): await self.step_manager.update_step_error_async( actor=self.actor, step_id=step_id, # Use original step_id for telemetry - error_type=type(e).__name__ if "e" in locals() else "Unknown", - error_message=str(e) if "e" in locals() else "Unknown error", + error_type=type(caught_exception).__name__ if caught_exception is not None else "Unknown", + error_message=str(caught_exception) if caught_exception is not None else "Unknown error", error_traceback=traceback.format_exc(), stop_reason=stop_reason, ) @@ -1481,7 +1427,7 @@ class LettaAgent(BaseAgent): agent_tags=agent_state.tags, run_id=self.current_run_id, step_id=step_metrics.id, - call_type="agent_step", + call_type=LLMCallType.agent_step, ) response = await llm_client.request_async_with_telemetry(request_data, agent_state.llm_config) @@ -1554,13 +1500,13 @@ class LettaAgent(BaseAgent): agent_tags=agent_state.tags, run_id=self.current_run_id, step_id=step_id, - call_type="agent_step", + call_type=LLMCallType.agent_step, ) # Attempt LLM request with telemetry wrapper return ( request_data, - await llm_client.stream_async_with_telemetry(request_data, agent_state.llm_config), + await llm_client.stream_async(request_data, agent_state.llm_config), current_in_context_messages, new_in_context_messages, valid_tool_names, @@ -1605,8 +1551,10 @@ class LettaAgent(BaseAgent): run_id=run_id, step_id=step_id, ) + elif isinstance(e, LLMError): + raise else: - raise llm_client.handle_llm_error(e) + raise llm_client.handle_llm_error(e, llm_config=llm_config) @trace_method async def _rebuild_context_window( @@ -1626,7 +1574,7 @@ class LettaAgent(BaseAgent): self.logger.warning( f"Total tokens {total_tokens} exceeds configured max tokens {llm_config.context_window}, forcefully clearing message history." ) - new_in_context_messages, updated = await self.summarizer.summarize( + new_in_context_messages, _updated = await self.summarizer.summarize( in_context_messages=in_context_messages, new_letta_messages=new_letta_messages, force=True, @@ -1639,7 +1587,7 @@ class LettaAgent(BaseAgent): self.logger.info( f"Total tokens {total_tokens} does not exceed configured max tokens {llm_config.context_window}, passing summarizing w/o force." ) - new_in_context_messages, updated = await self.summarizer.summarize( + new_in_context_messages, _updated = await self.summarizer.summarize( in_context_messages=in_context_messages, new_letta_messages=new_letta_messages, run_id=run_id, @@ -1659,7 +1607,7 @@ class LettaAgent(BaseAgent): agent_state = await self.agent_manager.get_agent_by_id_async(agent_id=self.agent_id, actor=self.actor) message_ids = agent_state.message_ids in_context_messages = await self.message_manager.get_messages_by_ids_async(message_ids=message_ids, actor=self.actor) - new_in_context_messages, updated = await self.summarizer.summarize( + new_in_context_messages, _updated = await self.summarizer.summarize( in_context_messages=in_context_messages, new_letta_messages=[], force=True ) return await self.agent_manager.update_message_ids_async( diff --git a/letta/agents/letta_agent_batch.py b/letta/agents/letta_agent_batch.py index 7bcc74f0..35d8c8f6 100644 --- a/letta/agents/letta_agent_batch.py +++ b/letta/agents/letta_agent_batch.py @@ -217,7 +217,7 @@ class LettaAgentBatch(BaseAgent): if batch_items: log_event(name="bulk_create_batch_items") - batch_items_persisted = await self.batch_manager.create_llm_batch_items_bulk_async(batch_items, actor=self.actor) + await self.batch_manager.create_llm_batch_items_bulk_async(batch_items, actor=self.actor) log_event(name="return_batch_response") return LettaBatchResponse( diff --git a/letta/agents/letta_agent_v2.py b/letta/agents/letta_agent_v2.py index 58379c78..686d49fb 100644 --- a/letta/agents/letta_agent_v2.py +++ b/letta/agents/letta_agent_v2.py @@ -9,7 +9,6 @@ from letta.adapters.letta_llm_adapter import LettaLLMAdapter from letta.adapters.letta_llm_request_adapter import LettaLLMRequestAdapter from letta.adapters.letta_llm_stream_adapter import LettaLLMStreamAdapter from letta.agents.base_agent_v2 import BaseAgentV2 -from letta.agents.ephemeral_summary_agent import EphemeralSummaryAgent from letta.agents.helpers import ( _build_rule_violation_result, _load_last_function_response, @@ -20,7 +19,7 @@ from letta.agents.helpers import ( generate_step_id, ) from letta.constants import DEFAULT_MAX_STEPS, NON_USER_MSG_PREFIX, REQUEST_HEARTBEAT_PARAM -from letta.errors import ContextWindowExceededError, LLMError +from letta.errors import ContextWindowExceededError, InsufficientCreditsError, LLMError from letta.helpers import ToolRulesSolver from letta.helpers.datetime_helpers import get_utc_time, get_utc_timestamp_ns, ns_to_ms from letta.helpers.reasoning_helper import scrub_inner_thoughts_from_messages @@ -31,7 +30,7 @@ from letta.log import get_logger from letta.otel.tracing import log_event, trace_method, tracer from letta.prompts.prompt_generator import PromptGenerator from letta.schemas.agent import AgentState, UpdateAgent -from letta.schemas.enums import AgentType, MessageStreamStatus, RunStatus, StepStatus +from letta.schemas.enums import AgentType, LLMCallType, MessageStreamStatus, RunStatus, StepStatus from letta.schemas.letta_message import LettaMessage, MessageType from letta.schemas.letta_message_content import OmittedReasoningContent, ReasoningContent, RedactedReasoningContent, TextContent from letta.schemas.letta_request import ClientToolSchema @@ -58,6 +57,7 @@ from letta.server.rest_api.utils import ( from letta.services.agent_manager import AgentManager from letta.services.archive_manager import ArchiveManager from letta.services.block_manager import BlockManager +from letta.services.credit_verification_service import CreditVerificationService from letta.services.helpers.tool_parser_helper import runtime_override_tool_json_schema from letta.services.message_manager import MessageManager from letta.services.passage_manager import PassageManager @@ -67,10 +67,10 @@ from letta.services.summarizer.enums import SummarizationMode from letta.services.summarizer.summarizer import Summarizer from letta.services.telemetry_manager import TelemetryManager from letta.services.tool_executor.tool_execution_manager import ToolExecutionManager -from letta.settings import model_settings, settings, summarizer_settings +from letta.settings import settings, summarizer_settings from letta.system import package_function_response from letta.types import JsonDict -from letta.utils import log_telemetry, safe_create_task, united_diff, validate_function_response +from letta.utils import log_telemetry, safe_create_task, safe_create_task_with_return, united_diff, validate_function_response class LettaAgentV2(BaseAgentV2): @@ -106,6 +106,7 @@ class LettaAgentV2(BaseAgentV2): self.passage_manager = PassageManager() self.step_manager = StepManager() self.telemetry_manager = TelemetryManager() + self.credit_verification_service = CreditVerificationService() ## TODO: Expand to more # if summarizer_settings.enable_summarization and model_settings.openai_api_key: @@ -158,6 +159,8 @@ class LettaAgentV2(BaseAgentV2): llm_adapter=LettaLLMRequestAdapter( llm_client=self.llm_client, llm_config=self.agent_state.llm_config, + call_type=LLMCallType.agent_step, + agent_id=self.agent_state.id, agent_tags=self.agent_state.tags, org_id=self.actor.organization_id, user_id=self.actor.id, @@ -181,6 +184,7 @@ class LettaAgentV2(BaseAgentV2): include_return_message_types: list[MessageType] | None = None, request_start_timestamp_ns: int | None = None, client_tools: list[ClientToolSchema] | None = None, + include_compaction_messages: bool = False, # Not used in V2, but accepted for API compatibility ) -> LettaResponse: """ Execute the agent loop in blocking mode, returning all messages at once. @@ -193,6 +197,7 @@ class LettaAgentV2(BaseAgentV2): include_return_message_types: Filter for which message types to return request_start_timestamp_ns: Start time for tracking request duration client_tools: Optional list of client-side tools (not used in V2, for API compatibility) + include_compaction_messages: Not used in V2, but accepted for API compatibility. Returns: LettaResponse: Complete response with all messages and metadata @@ -205,15 +210,25 @@ class LettaAgentV2(BaseAgentV2): ) in_context_messages = in_context_messages + input_messages_to_persist response_letta_messages = [] + credit_task = None for i in range(max_steps): remaining_turns = max_steps - i - 1 + # Await credit check from previous iteration before running next step + if credit_task is not None: + if not await credit_task: + self.should_continue = False + self.stop_reason = LettaStopReason(stop_reason=StopReasonType.insufficient_credits) + break + credit_task = None + response = self._step( messages=in_context_messages + self.response_messages, input_messages_to_persist=input_messages_to_persist, llm_adapter=LettaLLMRequestAdapter( llm_client=self.llm_client, llm_config=self.agent_state.llm_config, + call_type=LLMCallType.agent_step, agent_id=self.agent_state.id, agent_tags=self.agent_state.tags, run_id=run_id, @@ -233,6 +248,9 @@ class LettaAgentV2(BaseAgentV2): if not self.should_continue: break + # Fire credit check to run in parallel with loop overhead / next step setup + credit_task = safe_create_task_with_return(self._check_credits()) + input_messages_to_persist = [] # Rebuild context window after stepping @@ -271,6 +289,7 @@ class LettaAgentV2(BaseAgentV2): request_start_timestamp_ns: int | None = None, conversation_id: str | None = None, # Not used in V2, but accepted for API compatibility client_tools: list[ClientToolSchema] | None = None, + include_compaction_messages: bool = False, # Not used in V2, but accepted for API compatibility ) -> AsyncGenerator[str, None]: """ Execute the agent loop in streaming mode, yielding chunks as they become available. @@ -289,6 +308,7 @@ class LettaAgentV2(BaseAgentV2): include_return_message_types: Filter for which message types to return request_start_timestamp_ns: Start time for tracking request duration client_tools: Optional list of client-side tools (not used in V2, for API compatibility) + include_compaction_messages: Not used in V2, but accepted for API compatibility. Yields: str: JSON-formatted SSE data chunks for each completed step @@ -301,6 +321,7 @@ class LettaAgentV2(BaseAgentV2): llm_adapter = LettaLLMStreamAdapter( llm_client=self.llm_client, llm_config=self.agent_state.llm_config, + call_type=LLMCallType.agent_step, agent_id=self.agent_state.id, agent_tags=self.agent_state.tags, run_id=run_id, @@ -311,6 +332,7 @@ class LettaAgentV2(BaseAgentV2): llm_adapter = LettaLLMRequestAdapter( llm_client=self.llm_client, llm_config=self.agent_state.llm_config, + call_type=LLMCallType.agent_step, agent_id=self.agent_state.id, agent_tags=self.agent_state.tags, run_id=run_id, @@ -323,7 +345,16 @@ class LettaAgentV2(BaseAgentV2): input_messages, self.agent_state, self.message_manager, self.actor, run_id ) in_context_messages = in_context_messages + input_messages_to_persist + credit_task = None for i in range(max_steps): + # Await credit check from previous iteration before running next step + if credit_task is not None: + if not await credit_task: + self.should_continue = False + self.stop_reason = LettaStopReason(stop_reason=StopReasonType.insufficient_credits) + break + credit_task = None + response = self._step( messages=in_context_messages + self.response_messages, input_messages_to_persist=input_messages_to_persist, @@ -342,6 +373,9 @@ class LettaAgentV2(BaseAgentV2): if not self.should_continue: break + # Fire credit check to run in parallel with loop overhead / next step setup + credit_task = safe_create_task_with_return(self._check_credits()) + input_messages_to_persist = [] if self.stop_reason is None: @@ -420,8 +454,9 @@ class LettaAgentV2(BaseAgentV2): raise AssertionError("run_id is required when enforce_run_id_set is True") step_progression = StepProgression.START + caught_exception = None # TODO(@caren): clean this up - tool_call, reasoning_content, agent_step_span, first_chunk, step_id, logged_step, step_start_ns, step_metrics = ( + tool_call, reasoning_content, agent_step_span, first_chunk, step_id, logged_step, _step_start_ns, step_metrics = ( None, None, None, @@ -580,6 +615,7 @@ class LettaAgentV2(BaseAgentV2): ) step_progression, step_metrics = await self._step_checkpoint_finish(step_metrics, agent_step_span, logged_step) except Exception as e: + caught_exception = e self.logger.warning(f"Error during step processing: {e}") self.job_update_metadata = {"error": str(e)} @@ -615,8 +651,8 @@ class LettaAgentV2(BaseAgentV2): await self.step_manager.update_step_error_async( actor=self.actor, step_id=step_id, # Use original step_id for telemetry - error_type=type(e).__name__ if "e" in locals() else "Unknown", - error_message=str(e) if "e" in locals() else "Unknown error", + error_type=type(caught_exception).__name__ if caught_exception is not None else "Unknown", + error_message=str(caught_exception) if caught_exception is not None else "Unknown error", error_traceback=traceback.format_exc(), stop_reason=self.stop_reason, ) @@ -667,6 +703,17 @@ class LettaAgentV2(BaseAgentV2): self.last_function_response = None self.response_messages = [] + async def _check_credits(self) -> bool: + """Check if the organization still has credits. Returns True if OK or not configured.""" + try: + await self.credit_verification_service.verify_credits(self.actor.organization_id, self.agent_state.id) + return True + except InsufficientCreditsError: + self.logger.warning( + f"Insufficient credits for organization {self.actor.organization_id}, agent {self.agent_state.id}, stopping agent loop" + ) + return False + @trace_method async def _check_run_cancellation(self, run_id) -> bool: try: @@ -678,20 +725,37 @@ class LettaAgentV2(BaseAgentV2): return False @trace_method - async def _refresh_messages(self, in_context_messages: list[Message]): - num_messages = await self.message_manager.size_async( - agent_id=self.agent_state.id, - actor=self.actor, - ) - num_archival_memories = await self.passage_manager.agent_passage_size_async( - agent_id=self.agent_state.id, - actor=self.actor, - ) - in_context_messages = await self._rebuild_memory( - in_context_messages, - num_messages=num_messages, - num_archival_memories=num_archival_memories, - ) + async def _refresh_messages(self, in_context_messages: list[Message], force_system_prompt_refresh: bool = False): + """Refresh in-context messages. + + This performs two tasks: + 1) Rebuild the *system prompt* only if the memory/tool-rules/directories section has changed. + This avoids rebuilding the system prompt on every step due to dynamic metadata (e.g. message counts), + which can bust prefix caching. + 2) Scrub inner thoughts from messages. + + Args: + in_context_messages: Current in-context messages + force_system_prompt_refresh: If True, forces evaluation of whether the system prompt needs to be rebuilt. + (The rebuild will still be skipped if memory/tool-rules/directories haven't changed.) + + Returns: + Refreshed in-context messages. + """ + # Only rebuild when explicitly forced (e.g., after compaction). + # Normal turns should not trigger system prompt recompilation. + if force_system_prompt_refresh: + try: + in_context_messages = await self._rebuild_memory( + in_context_messages, + num_messages=None, + num_archival_memories=None, + force=True, + ) + except Exception: + raise + + # Always scrub inner thoughts regardless of system prompt refresh in_context_messages = scrub_inner_thoughts_from_messages(in_context_messages, self.agent_state.llm_config) return in_context_messages @@ -699,8 +763,9 @@ class LettaAgentV2(BaseAgentV2): async def _rebuild_memory( self, in_context_messages: list[Message], - num_messages: int, - num_archival_memories: int, + num_messages: int | None, + num_archival_memories: int | None, + force: bool = False, ): agent_state = await self.agent_manager.refresh_memory_async(agent_state=self.agent_state, actor=self.actor) @@ -721,49 +786,26 @@ class LettaAgentV2(BaseAgentV2): else: archive_tags = None - # TODO: This is a pretty brittle pattern established all over our code, need to get rid of this curr_system_message = in_context_messages[0] curr_system_message_text = curr_system_message.content[0].text - # Extract the memory section that includes , tool rules, and directories. - # This avoids timestamp comparison issues in , which is dynamic. - def extract_memory_section(text: str) -> str: - # Primary pattern: everything from up to - mem_start = text.find("") - meta_start = text.find("") - if mem_start != -1: - if meta_start != -1 and meta_start > mem_start: - return text[mem_start:meta_start] - return text[mem_start:] - - # Fallback pattern used in some legacy prompts: between and - base_end = text.find("") - if base_end != -1: - if meta_start != -1 and meta_start > base_end: - return text[base_end + len("") : meta_start] - return text[base_end + len("") :] - - # Last resort: return full text - return text - - curr_memory_section = extract_memory_section(curr_system_message_text) - # refresh files agent_state = await self.agent_manager.refresh_file_blocks(agent_state=agent_state, actor=self.actor) - # generate just the memory string with current state for comparison + # generate memory string with current state curr_memory_str = agent_state.memory.compile( tool_usage_rules=tool_constraint_block, sources=agent_state.sources, max_files_open=agent_state.max_files_open, llm_config=agent_state.llm_config, ) - new_memory_section = extract_memory_section(curr_memory_str) - # compare just the memory sections (memory blocks, tool rules, directories) - if curr_memory_section.strip() == new_memory_section.strip(): + # Skip rebuild unless explicitly forced and unless system/memory content actually changed. + system_prompt_changed = agent_state.system not in curr_system_message_text + memory_changed = curr_memory_str not in curr_system_message_text + if (not force) and (not system_prompt_changed) and (not memory_changed): self.logger.debug( - f"Memory and sources haven't changed for agent id={agent_state.id} and actor=({self.actor.id}, {self.actor.name}), skipping system prompt rebuild" + f"Memory, sources, and system prompt haven't changed for agent id={agent_state.id} and actor=({self.actor.id}, {self.actor.name}), skipping system prompt rebuild" ) return in_context_messages @@ -793,7 +835,7 @@ class LettaAgentV2(BaseAgentV2): new_system_message = await self.message_manager.update_message_by_id_async( curr_system_message.id, message_update=MessageUpdate(content=new_system_message_str), actor=self.actor ) - return [new_system_message] + in_context_messages[1:] + return [new_system_message, *in_context_messages[1:]] else: return in_context_messages @@ -864,6 +906,7 @@ class LettaAgentV2(BaseAgentV2): step_id=step_id, project_id=self.agent_state.project_id, status=StepStatus.PENDING, + model_handle=self.agent_state.llm_config.handle, ) # Also create step metrics early and update at the end of the step @@ -1279,7 +1322,7 @@ class LettaAgentV2(BaseAgentV2): self.logger.warning( f"Total tokens {total_tokens} exceeds configured max tokens {self.agent_state.llm_config.context_window}, forcefully clearing message history." ) - new_in_context_messages, updated = await self.summarizer.summarize( + new_in_context_messages, _updated = await self.summarizer.summarize( in_context_messages=in_context_messages, new_letta_messages=new_letta_messages, force=True, @@ -1292,7 +1335,7 @@ class LettaAgentV2(BaseAgentV2): self.logger.info( f"Total tokens {total_tokens} does not exceed configured max tokens {self.agent_state.llm_config.context_window}, passing summarizing w/o force." ) - new_in_context_messages, updated = await self.summarizer.summarize( + new_in_context_messages, _updated = await self.summarizer.summarize( in_context_messages=in_context_messages, new_letta_messages=new_letta_messages, run_id=run_id, diff --git a/letta/agents/letta_agent_v3.py b/letta/agents/letta_agent_v3.py index 0a4ea5c2..c53910b0 100644 --- a/letta/agents/letta_agent_v3.py +++ b/letta/agents/letta_agent_v3.py @@ -1,11 +1,12 @@ import asyncio import json import uuid -from typing import Any, AsyncGenerator, Dict, Literal, Optional +from typing import Any, AsyncGenerator, Dict, Optional from opentelemetry.trace import Span from letta.adapters.letta_llm_adapter import LettaLLMAdapter +from letta.adapters.sglang_native_adapter import SGLangNativeAdapter from letta.adapters.simple_llm_request_adapter import SimpleLLMRequestAdapter from letta.adapters.simple_llm_stream_adapter import SimpleLLMStreamAdapter from letta.agents.helpers import ( @@ -19,28 +20,34 @@ from letta.agents.helpers import ( merge_and_validate_prefilled_args, ) from letta.agents.letta_agent_v2 import LettaAgentV2 -from letta.constants import DEFAULT_MAX_STEPS, NON_USER_MSG_PREFIX, REQUEST_HEARTBEAT_PARAM, SUMMARIZATION_TRIGGER_MULTIPLIER +from letta.constants import DEFAULT_MAX_STEPS, NON_USER_MSG_PREFIX, REQUEST_HEARTBEAT_PARAM from letta.errors import ContextWindowExceededError, LLMError, SystemPromptTokenExceededError from letta.helpers import ToolRulesSolver from letta.helpers.datetime_helpers import get_utc_time, get_utc_timestamp_ns -from letta.helpers.message_helper import convert_message_creates_to_messages from letta.helpers.tool_execution_helper import enable_strict_mode from letta.local_llm.constants import INNER_THOUGHTS_KWARG from letta.otel.tracing import trace_method from letta.schemas.agent import AgentState -from letta.schemas.enums import MessageRole -from letta.schemas.letta_message import ApprovalReturn, LettaErrorMessage, LettaMessage, MessageType +from letta.schemas.enums import LLMCallType +from letta.schemas.letta_message import ( + ApprovalReturn, + CompactionStats, + EventMessage, + LettaErrorMessage, + LettaMessage, + MessageType, + SummaryMessage, + extract_compaction_stats_from_packed_json, +) from letta.schemas.letta_message_content import OmittedReasoningContent, ReasoningContent, RedactedReasoningContent, TextContent from letta.schemas.letta_request import ClientToolSchema -from letta.schemas.letta_response import LettaResponse +from letta.schemas.letta_response import LettaResponse, TurnTokenData from letta.schemas.letta_stop_reason import LettaStopReason, StopReasonType -from letta.schemas.llm_config import LLMConfig from letta.schemas.message import Message, MessageCreate, ToolReturn -from letta.schemas.openai.chat_completion_response import FunctionCall, ToolCall, ToolCallDenial, UsageStatistics +from letta.schemas.openai.chat_completion_response import ChoiceLogprobs, ToolCall, ToolCallDenial, UsageStatistics from letta.schemas.step import StepProgression from letta.schemas.step_metrics import StepMetrics from letta.schemas.tool_execution_result import ToolExecutionResult -from letta.schemas.usage import LettaUsageStatistics from letta.schemas.user import User from letta.server.rest_api.utils import ( create_approval_request_message_from_llm_response, @@ -50,15 +57,31 @@ from letta.server.rest_api.utils import ( ) from letta.services.conversation_manager import ConversationManager from letta.services.helpers.tool_parser_helper import runtime_override_tool_json_schema -from letta.services.summarizer.summarizer_all import summarize_all +from letta.services.summarizer.compact import compact_messages from letta.services.summarizer.summarizer_config import CompactionSettings -from letta.services.summarizer.summarizer_sliding_window import ( - count_tokens, - summarize_via_sliding_window, -) +from letta.services.summarizer.summarizer_sliding_window import count_tokens from letta.settings import settings, summarizer_settings -from letta.system import package_function_response, package_summarize_message_no_counts -from letta.utils import log_telemetry, validate_function_response +from letta.system import package_function_response +from letta.utils import safe_create_task_with_return, validate_function_response + + +def extract_compaction_stats_from_message(message: Message) -> CompactionStats | None: + """ + Extract CompactionStats from a Message object's packed content. + + Args: + message: Message object with packed JSON content + + Returns: + CompactionStats if found and valid, None otherwise + """ + try: + if message.content and len(message.content) == 1: + text_content = message.content[0].text + return extract_compaction_stats_from_packed_json(text_content) + except AttributeError: + pass + return None class LettaAgentV3(LettaAgentV2): @@ -96,6 +119,11 @@ class LettaAgentV3(LettaAgentV2): self.conversation_id: str | None = None # Client-side tools passed in the request (executed by client, not server) self.client_tools: list[ClientToolSchema] = [] + # Log probabilities from the most recent LLM call (for RL training) + self.logprobs: ChoiceLogprobs | None = None + # Multi-turn token tracking for RL training (accumulated across all LLM calls) + self.turns: list[TurnTokenData] = [] + self.return_token_ids: bool = False def _compute_tool_return_truncation_chars(self) -> int: """Compute a dynamic cap for tool returns in requests. @@ -120,6 +148,7 @@ class LettaAgentV3(LettaAgentV2): request_start_timestamp_ns: int | None = None, conversation_id: str | None = None, client_tools: list[ClientToolSchema] | None = None, + include_compaction_messages: bool = False, ) -> LettaResponse: """ Execute the agent loop in blocking mode, returning all messages at once. @@ -134,6 +163,8 @@ class LettaAgentV3(LettaAgentV2): conversation_id: Optional conversation ID for conversation-scoped messaging client_tools: Optional list of client-side tools. When called, execution pauses for client to provide tool returns. + include_compaction_messages: Whether to include SummaryMessage/EventMessage in response + and use role=summary for stored summary messages. Returns: LettaResponse: Complete response with all messages and metadata @@ -168,29 +199,65 @@ class LettaAgentV3(LettaAgentV2): input_messages_to_persist = [input_messages_to_persist[0]] self.in_context_messages = curr_in_context_messages + + # Check if we should use SGLang native adapter for multi-turn RL training + use_sglang_native = ( + self.agent_state.llm_config.return_token_ids + and self.agent_state.llm_config.handle + and self.agent_state.llm_config.handle.startswith("sglang/") + ) + self.return_token_ids = use_sglang_native + + if use_sglang_native: + # Use SGLang native adapter for multi-turn RL training + llm_adapter = SGLangNativeAdapter( + llm_client=self.llm_client, + llm_config=self.agent_state.llm_config, + call_type=LLMCallType.agent_step, + agent_id=self.agent_state.id, + agent_tags=self.agent_state.tags, + run_id=run_id, + org_id=self.actor.organization_id, + user_id=self.actor.id, + ) + # Reset turns tracking for this step + self.turns = [] + else: + llm_adapter = SimpleLLMRequestAdapter( + llm_client=self.llm_client, + llm_config=self.agent_state.llm_config, + call_type=LLMCallType.agent_step, + agent_id=self.agent_state.id, + agent_tags=self.agent_state.tags, + run_id=run_id, + org_id=self.actor.organization_id, + user_id=self.actor.id, + ) + + credit_task = None for i in range(max_steps): if i == 1 and follow_up_messages: input_messages_to_persist = follow_up_messages follow_up_messages = [] + # Await credit check from previous iteration before running next step + if credit_task is not None: + if not await credit_task: + self.should_continue = False + self.stop_reason = LettaStopReason(stop_reason=StopReasonType.insufficient_credits) + break + credit_task = None + response = self._step( # we append input_messages_to_persist since they aren't checkpointed as in-context until the end of the step (may be rolled back) messages=list(self.in_context_messages + input_messages_to_persist), input_messages_to_persist=input_messages_to_persist, - # TODO need to support non-streaming adapter too - llm_adapter=SimpleLLMRequestAdapter( - llm_client=self.llm_client, - llm_config=self.agent_state.llm_config, - agent_id=self.agent_state.id, - agent_tags=self.agent_state.tags, - run_id=run_id, - org_id=self.actor.organization_id, - user_id=self.actor.id, - ), + llm_adapter=llm_adapter, run_id=run_id, # use_assistant_message=use_assistant_message, include_return_message_types=include_return_message_types, request_start_timestamp_ns=request_start_timestamp_ns, + include_compaction_messages=include_compaction_messages, ) input_messages_to_persist = [] # clear after first step @@ -228,6 +295,9 @@ class LettaAgentV3(LettaAgentV2): if not self.should_continue: break + # Fire credit check to run in parallel with loop overhead / next step setup + credit_task = safe_create_task_with_return(self._check_credits()) + # input_messages_to_persist = [] if i == max_steps - 1 and self.stop_reason is None: @@ -260,7 +330,15 @@ class LettaAgentV3(LettaAgentV2): ) if include_return_message_types: response_letta_messages = [m for m in response_letta_messages if m.message_type in include_return_message_types] - result = LettaResponse(messages=response_letta_messages, stop_reason=self.stop_reason, usage=self.usage) + # Set context_tokens to expose actual context window usage (vs accumulated prompt_tokens) + self.usage.context_tokens = self.context_token_estimate + result = LettaResponse( + messages=response_letta_messages, + stop_reason=self.stop_reason, + usage=self.usage, + logprobs=self.logprobs, + turns=self.turns if self.return_token_ids and self.turns else None, + ) if run_id: if self.job_update_metadata is None: self.job_update_metadata = {} @@ -283,6 +361,7 @@ class LettaAgentV3(LettaAgentV2): request_start_timestamp_ns: int | None = None, conversation_id: str | None = None, client_tools: list[ClientToolSchema] | None = None, + include_compaction_messages: bool = False, ) -> AsyncGenerator[str, None]: """ Execute the agent loop in streaming mode, yielding chunks as they become available. @@ -322,20 +401,44 @@ class LettaAgentV3(LettaAgentV2): actor=self.actor, ) + # Check if we should use SGLang native adapter for multi-turn RL training + use_sglang_native = ( + self.agent_state.llm_config.return_token_ids + and self.agent_state.llm_config.handle + and self.agent_state.llm_config.handle.startswith("sglang/") + ) + self.return_token_ids = use_sglang_native + if stream_tokens: llm_adapter = SimpleLLMStreamAdapter( llm_client=self.llm_client, llm_config=self.agent_state.llm_config, + call_type=LLMCallType.agent_step, agent_id=self.agent_state.id, agent_tags=self.agent_state.tags, run_id=run_id, org_id=self.actor.organization_id, user_id=self.actor.id, ) + elif use_sglang_native: + # Use SGLang native adapter for multi-turn RL training + llm_adapter = SGLangNativeAdapter( + llm_client=self.llm_client, + llm_config=self.agent_state.llm_config, + call_type=LLMCallType.agent_step, + agent_id=self.agent_state.id, + agent_tags=self.agent_state.tags, + run_id=run_id, + org_id=self.actor.organization_id, + user_id=self.actor.id, + ) + # Reset turns tracking for this step + self.turns = [] else: llm_adapter = SimpleLLMRequestAdapter( llm_client=self.llm_client, llm_config=self.agent_state.llm_config, + call_type=LLMCallType.agent_step, agent_id=self.agent_state.id, agent_tags=self.agent_state.tags, run_id=run_id, @@ -359,10 +462,20 @@ class LettaAgentV3(LettaAgentV2): input_messages_to_persist = [input_messages_to_persist[0]] self.in_context_messages = in_context_messages + credit_task = None for i in range(max_steps): if i == 1 and follow_up_messages: input_messages_to_persist = follow_up_messages follow_up_messages = [] + + # Await credit check from previous iteration before running next step + if credit_task is not None: + if not await credit_task: + self.should_continue = False + self.stop_reason = LettaStopReason(stop_reason=StopReasonType.insufficient_credits) + break + credit_task = None + response = self._step( # we append input_messages_to_persist since they aren't checkpointed as in-context until the end of the step (may be rolled back) messages=list(self.in_context_messages + input_messages_to_persist), @@ -372,12 +485,26 @@ class LettaAgentV3(LettaAgentV2): # use_assistant_message=use_assistant_message, include_return_message_types=include_return_message_types, request_start_timestamp_ns=request_start_timestamp_ns, + include_compaction_messages=include_compaction_messages, ) input_messages_to_persist = [] # clear after first step async for chunk in response: response_letta_messages.append(chunk) if first_chunk: request_span = self._request_checkpoint_ttft(request_span, request_start_timestamp_ns) + + # Log chunks with missing id or otid for debugging. + # Compaction EventMessage is intentionally metadata-only and may omit otid. + is_compaction_event = isinstance(chunk, EventMessage) and chunk.event_type == "compaction" + if isinstance(chunk, LettaMessage) and (not chunk.id or not chunk.otid) and not is_compaction_event: + self.logger.warning( + "Streaming chunk missing id or otid: message_type=%s id=%s otid=%s step_id=%s", + chunk.message_type, + chunk.id, + chunk.otid, + chunk.step_id, + ) + yield f"data: {chunk.model_dump_json()}\n\n" first_chunk = False @@ -391,6 +518,9 @@ class LettaAgentV3(LettaAgentV2): if not self.should_continue: break + # Fire credit check to run in parallel with loop overhead / next step setup + credit_task = safe_create_task_with_return(self._check_credits()) + if i == max_steps - 1 and self.stop_reason is None: self.stop_reason = LettaStopReason(stop_reason=StopReasonType.max_steps.value) @@ -446,10 +576,19 @@ class LettaAgentV3(LettaAgentV2): # Cleanup and finalize (only runs if no exception occurred) try: + # Set context_tokens to expose actual context window usage (vs accumulated prompt_tokens) + self.usage.context_tokens = self.context_token_estimate + if run_id: # Filter out LettaStopReason from messages (only valid in LettaStreamingResponse, not LettaResponse) filtered_messages = [m for m in response_letta_messages if not isinstance(m, LettaStopReason)] - result = LettaResponse(messages=filtered_messages, stop_reason=self.stop_reason, usage=self.usage) + result = LettaResponse( + messages=filtered_messages, + stop_reason=self.stop_reason, + usage=self.usage, + logprobs=self.logprobs, + turns=self.turns if self.return_token_ids and self.turns else None, + ) if self.job_update_metadata is None: self.job_update_metadata = {} self.job_update_metadata["result"] = result.model_dump(mode="json") @@ -518,7 +657,7 @@ class LettaAgentV3(LettaAgentV2): message.conversation_id = self.conversation_id # persist the new message objects - ONLY place where messages are persisted - persisted_messages = await self.message_manager.create_many_messages_async( + await self.message_manager.create_many_messages_async( new_messages, actor=self.actor, run_id=run_id, @@ -556,6 +695,77 @@ class LettaAgentV3(LettaAgentV2): self.in_context_messages = in_context_messages # update in-memory state + def _create_compaction_event_message( + self, + step_id: str | None, + run_id: str | None, + trigger: str, + ) -> EventMessage: + """ + Create an EventMessage to notify the client that compaction is starting. + + Args: + step_id: The current step ID + run_id: The current run ID + trigger: The trigger that caused compaction (e.g., "context_window_exceeded", "post_step_context_check") + + Returns: + EventMessage to yield before compaction starts + """ + return EventMessage( + id=str(uuid.uuid4()), + date=get_utc_time(), + event_type="compaction", + event_data={ + "trigger": trigger, + "context_token_estimate": self.context_token_estimate, + "context_window": self.agent_state.llm_config.context_window, + }, + run_id=run_id, + step_id=step_id, + ) + + def _create_summary_result_message( + self, + summary_message: Message, + summary_text: str, + step_id: str | None, + run_id: str | None, + include_compaction_messages: bool, + ) -> list[LettaMessage]: + """ + Create the summary message to yield to the client after compaction completes. + + Args: + summary_message: The persisted summary Message object + summary_text: The raw summary text (unpacked) + step_id: The current step ID + run_id: The current run ID + include_compaction_messages: If True, return SummaryMessage; if False, return UserMessage + + Returns: + List of LettaMessage objects to yield to the client + """ + if include_compaction_messages: + # Extract compaction_stats from the packed message content if available + compaction_stats = extract_compaction_stats_from_message(summary_message) + + # New behavior: structured SummaryMessage + return [ + SummaryMessage( + id=summary_message.id, + date=summary_message.created_at, + summary=summary_text, + otid=Message.generate_otid_from_id(summary_message.id, 0), + step_id=step_id, + run_id=run_id, + compaction_stats=compaction_stats, + ), + ] + else: + # Old behavior: UserMessage with packed JSON + return list(Message.to_letta_messages(summary_message)) + @trace_method async def _step( self, @@ -569,6 +779,7 @@ class LettaAgentV3(LettaAgentV2): remaining_turns: int = -1, dry_run: bool = False, enforce_run_id_set: bool = True, + include_compaction_messages: bool = False, ) -> AsyncGenerator[LettaMessage | dict, None]: """ Execute a single agent step (one LLM call and tool execution). @@ -599,8 +810,9 @@ class LettaAgentV3(LettaAgentV2): self.logger.warning("Context token estimate is not set") step_progression = StepProgression.START + caught_exception = None # TODO(@caren): clean this up - tool_calls, content, agent_step_span, first_chunk, step_id, logged_step, step_start_ns, step_metrics = ( + tool_calls, content, agent_step_span, _first_chunk, step_id, logged_step, _step_start_ns, step_metrics = ( None, None, None, @@ -622,13 +834,11 @@ class LettaAgentV3(LettaAgentV2): self.logger.info("switching to unconstrained mode (allowing non-tool responses)") self._require_tool_call = require_tool_call - # Always refresh messages at the start of each step to pick up external inputs - # (e.g., approval responses submitted by the client while this stream is running) + # Refresh messages at the start of each step to scrub inner thoughts. + # NOTE: We skip system prompt refresh during normal steps to preserve prefix caching. + # The system prompt is only rebuilt after compaction or message reset. try: - # TODO: cleanup and de-dup - # updates the system prompt with the latest blocks / message histories - messages = await self._refresh_messages(messages) - + messages = await self._refresh_messages(messages, force_system_prompt_refresh=False) except Exception as e: self.logger.warning(f"Failed to refresh messages at step start: {e}") @@ -722,8 +932,8 @@ class LettaAgentV3(LettaAgentV2): or len([t for t in self.agent_state.tool_rules if t.type != "requires_approval"]) == 0 ) - # Anthropic/Bedrock parallel tool use - if self.agent_state.llm_config.model_endpoint_type in ["anthropic", "bedrock"]: + # Anthropic/Bedrock/MiniMax parallel tool use (MiniMax uses Anthropic-compatible API) + if self.agent_state.llm_config.model_endpoint_type in ["anthropic", "bedrock", "minimax"]: if ( isinstance(request_data.get("tool_choice"), dict) and "disable_parallel_tool_use" in request_data["tool_choice"] @@ -774,7 +984,6 @@ class LettaAgentV3(LettaAgentV2): async for chunk in invocation: if llm_adapter.supports_token_streaming(): if include_return_message_types is None or chunk.message_type in include_return_message_types: - first_chunk = True yield chunk # If you've reached this point without an error, break out of retry loop break @@ -790,35 +999,78 @@ class LettaAgentV3(LettaAgentV2): self.logger.info( f"Context window exceeded (error {e}), trying to compact messages attempt {llm_request_attempt + 1} of {summarizer_settings.max_summarizer_retries + 1}" ) - # checkpoint summarized messages - # TODO: might want to delay this checkpoint in case of corrupated state try: - summary_message, messages, _ = await self.compact( + # Capture pre-compaction state for metadata + context_tokens_before = self.context_token_estimate + messages_count_before = len(messages) + + # Yield event notification before compaction starts + if include_compaction_messages: + yield self._create_compaction_event_message( + step_id=step_id, + run_id=run_id, + trigger="context_window_exceeded", + ) + + # Ensure system prompt is recompiled before summarization so compaction + # operates on the latest system+memory state (including recent repairs). + # NOTE: we no longer refresh the system prompt before compaction so we can leverage cache for self mode + # messages = await self._refresh_messages(messages, force_system_prompt_refresh=True) + + summary_message, messages, summary_text = await self.compact( messages, trigger_threshold=self.agent_state.llm_config.context_window, run_id=run_id, step_id=step_id, + use_summary_role=include_compaction_messages, + trigger="context_window_exceeded", + context_tokens_before=context_tokens_before, + messages_count_before=messages_count_before, ) + + # Recompile the persisted system prompt after compaction so subsequent + # turns load the repaired system+memory state from message_ids[0]. + await self.agent_manager.rebuild_system_prompt_async( + agent_id=self.agent_state.id, + actor=self.actor, + force=True, + update_timestamp=True, + ) + # Force system prompt rebuild after compaction to update memory blocks and timestamps + messages = await self._refresh_messages(messages, force_system_prompt_refresh=True) self.logger.info("Summarization succeeded, continuing to retry LLM request") + + # Persist the summary message + self.response_messages.append(summary_message) + await self._checkpoint_messages( + run_id=run_id, + step_id=step_id, + new_messages=[summary_message], + in_context_messages=messages, + ) + + # Yield summary result message to client + for msg in self._create_summary_result_message( + summary_message=summary_message, + summary_text=summary_text, + step_id=step_id, + run_id=run_id, + include_compaction_messages=include_compaction_messages, + ): + yield msg + continue except SystemPromptTokenExceededError: + self.should_continue = False self.stop_reason = LettaStopReason( stop_reason=StopReasonType.context_window_overflow_in_system_prompt.value ) - raise e + raise except Exception as e: self.stop_reason = LettaStopReason(stop_reason=StopReasonType.error.value) self.logger.error(f"Unknown error occured for summarization run {run_id}: {e}") raise e - # update the messages - await self._checkpoint_messages( - run_id=run_id, - step_id=step_id, - new_messages=[summary_message], - in_context_messages=messages, - ) - else: self.stop_reason = LettaStopReason(stop_reason=StopReasonType.error.value) self.logger.error(f"Unknown error occured for run {run_id}: {e}") @@ -832,6 +1084,23 @@ class LettaAgentV3(LettaAgentV2): self.context_token_estimate = llm_adapter.usage.total_tokens self.logger.info(f"Context token estimate after LLM request: {self.context_token_estimate}") + # Extract logprobs if present (for RL training) + if llm_adapter.logprobs is not None: + self.logprobs = llm_adapter.logprobs + + # Track turn data for multi-turn RL training (SGLang native mode) + if self.return_token_ids and hasattr(llm_adapter, "output_ids") and llm_adapter.output_ids: + self.turns.append( + TurnTokenData( + role="assistant", + output_ids=llm_adapter.output_ids, + output_token_logprobs=llm_adapter.output_token_logprobs, + content=llm_adapter.chat_completions_response.choices[0].message.content + if llm_adapter.chat_completions_response + else None, + ) + ) + # Handle the AI response with the extracted data (supports multiple tool calls) # Gather tool calls - check for multi-call API first, then fall back to single if hasattr(llm_adapter, "tool_calls") and llm_adapter.tool_calls: @@ -878,6 +1147,36 @@ class LettaAgentV3(LettaAgentV2): self.response_messages.extend(new_messages) messages.extend(new_messages) + # Track tool return turns for multi-turn RL training + if self.return_token_ids: + for msg in new_messages: + if msg.role == "tool": + # Get tool return content + tool_content = None + tool_name = None + if hasattr(msg, "tool_returns") and msg.tool_returns: + # Aggregate all tool returns into content (func_response is the actual content) + parts = [] + for tr in msg.tool_returns: + if hasattr(tr, "func_response") and tr.func_response: + if isinstance(tr.func_response, str): + parts.append(tr.func_response) + else: + parts.append(str(tr.func_response)) + tool_content = "\n".join(parts) + elif hasattr(msg, "content") and msg.content: + tool_content = msg.content if isinstance(msg.content, str) else str(msg.content) + if hasattr(msg, "name"): + tool_name = msg.name + if tool_content: + self.turns.append( + TurnTokenData( + role="tool", + content=tool_content, + tool_name=tool_name, + ) + ) + # step(...) has successfully completed! now we can persist messages and update the in-context messages + save metrics # persistence needs to happen before streaming to minimize chances of agent getting into an inconsistent state step_progression, step_metrics = await self._step_checkpoint_finish(step_metrics, agent_step_span, logged_step) @@ -919,31 +1218,87 @@ class LettaAgentV3(LettaAgentV2): self.logger.info( f"Context window exceeded (current: {self.context_token_estimate}, threshold: {self.agent_state.llm_config.context_window}), trying to compact messages" ) - summary_message, messages, _ = await self.compact( - messages, - trigger_threshold=self.agent_state.llm_config.context_window, - run_id=run_id, - step_id=step_id, - ) - # TODO: persist + return the summary message - # TODO: convert this to a SummaryMessage - self.response_messages.append(summary_message) - for message in Message.to_letta_messages(summary_message): - yield message - await self._checkpoint_messages( - run_id=run_id, - step_id=step_id, - new_messages=[summary_message], - in_context_messages=messages, - ) + + # Capture pre-compaction state for metadata + context_tokens_before = self.context_token_estimate + messages_count_before = len(messages) + + # Yield event notification before compaction starts + if include_compaction_messages: + yield self._create_compaction_event_message( + step_id=step_id, + run_id=run_id, + trigger="post_step_context_check", + ) + + try: + # Ensure system prompt is recompiled before summarization so compaction + # operates on the latest system+memory state (including recent repairs). + # NOTE: we no longer refresh the system prompt before compaction so we can leverage cache for self mode + # messages = await self._refresh_messages(messages, force_system_prompt_refresh=True) + + summary_message, messages, summary_text = await self.compact( + messages, + trigger_threshold=self.agent_state.llm_config.context_window, + run_id=run_id, + step_id=step_id, + use_summary_role=include_compaction_messages, + trigger="post_step_context_check", + context_tokens_before=context_tokens_before, + messages_count_before=messages_count_before, + ) + + # Recompile the persisted system prompt after compaction so subsequent + # turns load the repaired system+memory state from message_ids[0]. + await self.agent_manager.rebuild_system_prompt_async( + agent_id=self.agent_state.id, + actor=self.actor, + force=True, + update_timestamp=True, + ) + # Force system prompt rebuild after compaction to update memory blocks and timestamps + messages = await self._refresh_messages(messages, force_system_prompt_refresh=True) + # TODO: persist + return the summary message + # TODO: convert this to a SummaryMessage + self.response_messages.append(summary_message) + + # Yield summary result message to client + for msg in self._create_summary_result_message( + summary_message=summary_message, + summary_text=summary_text, + step_id=step_id, + run_id=run_id, + include_compaction_messages=include_compaction_messages, + ): + yield msg + + await self._checkpoint_messages( + run_id=run_id, + step_id=step_id, + new_messages=[summary_message], + in_context_messages=messages, + ) + except SystemPromptTokenExceededError: + self.should_continue = False + self.stop_reason = LettaStopReason(stop_reason=StopReasonType.context_window_overflow_in_system_prompt.value) + raise except Exception as e: + caught_exception = e # NOTE: message persistence does not happen in the case of an exception (rollback to previous state) # Use repr() if str() is empty (happens with Exception() with no args) error_detail = str(e) or repr(e) self.logger.warning(f"Error during step processing: {error_detail}") self.job_update_metadata = {"error": error_detail} + # Stop the agent loop on any exception to prevent wasteful retry loops + # (e.g., if post-step compaction fails, we don't want to keep retrying) + self.should_continue = False + self.logger.warning( + f"Agent loop stopped due to exception (step_progression={step_progression.name}, " + f"exception_type={type(e).__name__}): {error_detail}" + ) + # This indicates we failed after we decided to stop stepping, which indicates a bug with our flow. if not self.stop_reason: self.stop_reason = LettaStopReason(stop_reason=StopReasonType.error.value) @@ -980,8 +1335,8 @@ class LettaAgentV3(LettaAgentV2): await self.step_manager.update_step_error_async( actor=self.actor, step_id=step_id, # Use original step_id for telemetry - error_type=type(e).__name__ if "e" in locals() else "Unknown", - error_message=str(e) if "e" in locals() else "Unknown error", + error_type=type(caught_exception).__name__ if caught_exception is not None else "Unknown", + error_message=str(caught_exception) if caught_exception is not None else "Unknown error", error_traceback=traceback.format_exc(), stop_reason=self.stop_reason, ) @@ -1318,10 +1673,10 @@ class LettaAgentV3(LettaAgentV2): # Decide continuation for this tool if has_prefill_error: cont = False - hb_reason = None + _hb_reason = None sr = LettaStopReason(stop_reason=StopReasonType.invalid_tool_call.value) else: - cont, hb_reason, sr = self._decide_continuation( + cont, _hb_reason, sr = self._decide_continuation( agent_state=self.agent_state, tool_call_name=spec["name"], tool_rule_violated=spec["violated"], @@ -1501,6 +1856,10 @@ class LettaAgentV3(LettaAgentV2): compaction_settings: Optional["CompactionSettings"] = None, run_id: Optional[str] = None, step_id: Optional[str] = None, + use_summary_role: bool = False, + trigger: Optional[str] = None, + context_tokens_before: Optional[int] = None, + messages_count_before: Optional[int] = None, ) -> tuple[Message, list[Message], str]: """Compact the current in-context messages for this agent. @@ -1508,223 +1867,41 @@ class LettaAgentV3(LettaAgentV2): ``compaction_settings.model`` when provided. This mirrors how agent creation derives defaults from provider-specific ModelSettings, but is localized to summarization. + + Args: + use_summary_role: If True, the summary message will be created with + role=summary instead of role=user. This enables first-class + summary message handling in the database and API responses. + trigger: What triggered the compaction (e.g., "context_window_exceeded", "post_step_context_check"). + context_tokens_before: Token count before compaction (for stats). + messages_count_before: Message count before compaction (for stats). """ - # Use the passed-in compaction_settings first, then agent's compaction_settings if set, - # otherwise fall back to global defaults based on the agent's model handle. - if compaction_settings is not None: - summarizer_config = compaction_settings - elif self.agent_state.compaction_settings is not None: - summarizer_config = self.agent_state.compaction_settings - else: - # Prefer the new handle field if set, otherwise derive from llm_config - if self.agent_state.model is not None: - handle = self.agent_state.model - else: - llm_cfg = self.agent_state.llm_config - handle = llm_cfg.handle or f"{llm_cfg.model_endpoint_type}/{llm_cfg.model}" + # Determine compaction settings: passed-in > agent's > global defaults + effective_compaction_settings = compaction_settings or self.agent_state.compaction_settings - summarizer_config = CompactionSettings(model=handle) - - # Build the LLMConfig used for summarization - summarizer_llm_config = await self._build_summarizer_llm_config( - agent_llm_config=self.agent_state.llm_config, - summarizer_config=summarizer_config, - ) - - summarization_mode_used = summarizer_config.mode - if summarizer_config.mode == "all": - summary, compacted_messages = await summarize_all( - actor=self.actor, - llm_config=summarizer_llm_config, - summarizer_config=summarizer_config, - in_context_messages=messages, - agent_id=self.agent_state.id, - agent_tags=self.agent_state.tags, - run_id=run_id, - step_id=step_id, - ) - elif summarizer_config.mode == "sliding_window": - try: - summary, compacted_messages = await summarize_via_sliding_window( - actor=self.actor, - llm_config=summarizer_llm_config, - summarizer_config=summarizer_config, - in_context_messages=messages, - agent_id=self.agent_state.id, - agent_tags=self.agent_state.tags, - run_id=run_id, - step_id=step_id, - ) - except Exception as e: - self.logger.error(f"Sliding window summarization failed with exception: {str(e)}. Falling back to all mode.") - summary, compacted_messages = await summarize_all( - actor=self.actor, - llm_config=summarizer_llm_config, - summarizer_config=summarizer_config, - in_context_messages=messages, - agent_id=self.agent_state.id, - agent_tags=self.agent_state.tags, - run_id=run_id, - step_id=step_id, - ) - summarization_mode_used = "all" - else: - raise ValueError(f"Invalid summarizer mode: {summarizer_config.mode}") - - # update the token count - self.context_token_estimate = await count_tokens( - actor=self.actor, llm_config=self.agent_state.llm_config, messages=compacted_messages - ) - self.logger.info(f"Context token estimate after summarization: {self.context_token_estimate}") - - # if the trigger_threshold is provided, we need to make sure that the new token count is below it - if trigger_threshold is not None and self.context_token_estimate is not None and self.context_token_estimate >= trigger_threshold: - # If even after summarization the context is still at or above - # the proactive summarization threshold, treat this as a hard - # failure: log loudly and evict all prior conversation state - # (keeping only the system message) to avoid getting stuck in - # repeated summarization loops. - self.logger.error( - "Summarization failed to sufficiently reduce context size: " - f"post-summarization tokens={self.context_token_estimate}, " - f"threshold={trigger_threshold}, context_window={self.context_token_estimate}. " - "Evicting all prior messages without a summary to break potential loops.", - ) - - # if we used the sliding window mode, try to summarize again with the all mode - if summarization_mode_used == "sliding_window": - # try to summarize again with the all mode - summary, compacted_messages = await summarize_all( - actor=self.actor, - llm_config=self.agent_state.llm_config, - summarizer_config=summarizer_config, - in_context_messages=compacted_messages, - agent_id=self.agent_state.id, - agent_tags=self.agent_state.tags, - run_id=run_id, - step_id=step_id, - ) - summarization_mode_used = "all" - - self.context_token_estimate = await count_tokens( - actor=self.actor, llm_config=self.agent_state.llm_config, messages=compacted_messages - ) - - # final edge case: the system prompt is the cause of the context overflow (raise error) - if self.context_token_estimate is not None and self.context_token_estimate >= trigger_threshold: - await self._check_for_system_prompt_overflow(compacted_messages[0]) - - # raise an error if this is STILL not the problem - # do not throw an error, since we don't want to brick the agent - self.logger.error( - f"Failed to summarize messages after hard eviction and checking the system prompt token estimate: {self.context_token_estimate} > {trigger_threshold}" - ) - else: - self.logger.info( - f"Summarization fallback succeeded in bringing the context size below the trigger threshold: {self.context_token_estimate} < {trigger_threshold}" - ) - - # Persist the summary message to DB - summary_message_str_packed = package_summarize_message_no_counts( - summary=summary, - timezone=self.agent_state.timezone, - ) - summary_messages = await convert_message_creates_to_messages( - message_creates=[ - MessageCreate( - role=MessageRole.user, - content=[TextContent(text=summary_message_str_packed)], - ) - ], + result = await compact_messages( + actor=self.actor, agent_id=self.agent_state.id, + agent_llm_config=self.agent_state.llm_config, + telemetry_manager=self.telemetry_manager, + llm_client=self.llm_client, + agent_type=self.agent_state.agent_type, + messages=messages, timezone=self.agent_state.timezone, - # We already packed, don't pack again - wrap_user_message=False, - wrap_system_message=False, - run_id=None, # TODO: add this + compaction_settings=effective_compaction_settings, + agent_tags=self.agent_state.tags, + tools=await self._get_valid_tools(), # Pass json schemas including client tools for cache compatibility (for self compaction) + trigger_threshold=trigger_threshold, + run_id=run_id, + step_id=step_id, + use_summary_role=use_summary_role, + trigger=trigger, + context_tokens_before=context_tokens_before, + messages_count_before=messages_count_before, ) - if not len(summary_messages) == 1: - self.logger.error(f"Expected only one summary message, got {len(summary_messages)} in {summary_messages}") - summary_message_obj = summary_messages[0] - # final messages: inject summarization message at the beginning - final_messages = [compacted_messages[0]] + [summary_message_obj] - if len(compacted_messages) > 1: - final_messages += compacted_messages[1:] + # Update the agent's context token estimate + self.context_token_estimate = result.context_token_estimate - return summary_message_obj, final_messages, summary - - async def _build_summarizer_llm_config( - self, - agent_llm_config: LLMConfig, - summarizer_config: CompactionSettings, - ) -> LLMConfig: - """Derive an LLMConfig for summarization from a model handle. - - This mirrors the agent-creation path: start from the agent's LLMConfig, - override provider/model/handle from ``compaction_settings.model``, and - then apply any explicit ``compaction_settings.model_settings`` via - ``_to_legacy_config_params``. - """ - - # If no summarizer model handle is provided, fall back to the agent's config - if not summarizer_config.model: - return agent_llm_config - - try: - # Parse provider/model from the handle, falling back to the agent's - # provider type when only a model name is given. - if "/" in summarizer_config.model: - provider_name, model_name = summarizer_config.model.split("/", 1) - else: - provider_name = agent_llm_config.provider_name - model_name = summarizer_config.model - - # Start from the agent's config and override model + provider_name + handle - # Check if the summarizer's provider matches the agent's provider - # If they match, we can safely use the agent's config as a base - # If they don't match, we need to load the default config for the new provider - from letta.schemas.enums import ProviderType - - provider_matches = False - try: - # Check if provider_name is a valid ProviderType that matches agent's endpoint type - provider_type = ProviderType(provider_name) - provider_matches = provider_type.value == agent_llm_config.model_endpoint_type - except ValueError: - # provider_name is a custom label - check if it matches agent's provider_name - provider_matches = provider_name == agent_llm_config.provider_name - - if provider_matches: - # Same provider - use agent's config as base and override model/handle - base = agent_llm_config.model_copy() - base.model = model_name - base.handle = summarizer_config.model - else: - # Different provider - load default config for this handle - from letta.services.provider_manager import ProviderManager - - provider_manager = ProviderManager() - try: - base = await provider_manager.get_llm_config_from_handle( - handle=summarizer_config.model, - actor=self.actor, - ) - except Exception as e: - self.logger.warning( - f"Failed to load LLM config for summarizer handle '{summarizer_config.model}': {e}. " - f"Falling back to agent's LLM config." - ) - return agent_llm_config - - # If explicit model_settings are provided for the summarizer, apply - # them just like server.create_agent_async does for agents. - if summarizer_config.model_settings is not None: - update_params = summarizer_config.model_settings._to_legacy_config_params() - return base.model_copy(update=update_params) - - return base - except Exception: - # On any error, do not break the agent – just fall back - return agent_llm_config + return result.summary_message, result.compacted_messages, result.summary_text diff --git a/letta/agents/voice_agent.py b/letta/agents/voice_agent.py index 068d6f3e..1bf41729 100644 --- a/letta/agents/voice_agent.py +++ b/letta/agents/voice_agent.py @@ -1,10 +1,13 @@ import json import uuid from datetime import datetime, timedelta, timezone -from typing import Any, AsyncGenerator, Dict, List, Optional +from typing import TYPE_CHECKING, Any, AsyncGenerator, Dict, List, Optional import openai +if TYPE_CHECKING: + from letta.schemas.tool_execution_result import ToolExecutionResult + from letta.agents.base_agent import BaseAgent from letta.agents.exceptions import IncompatibleAgentType from letta.agents.voice_sleeptime_agent import VoiceSleeptimeAgent @@ -250,7 +253,6 @@ class VoiceAgent(BaseAgent): agent_state=agent_state, ) tool_result = tool_execution_result.func_return - success_flag = tool_execution_result.success_flag # 3. Provide function_call response back into the conversation # TODO: fix this tool format @@ -292,7 +294,7 @@ class VoiceAgent(BaseAgent): new_letta_messages = await self.message_manager.create_many_messages_async(letta_message_db_queue, actor=self.actor) # TODO: Make this more general and configurable, less brittle - new_in_context_messages, updated = await summarizer.summarize( + new_in_context_messages, _updated = await summarizer.summarize( in_context_messages=in_context_messages, new_letta_messages=new_letta_messages ) diff --git a/letta/agents/voice_sleeptime_agent.py b/letta/agents/voice_sleeptime_agent.py index fbd5d145..6f7f184f 100644 --- a/letta/agents/voice_sleeptime_agent.py +++ b/letta/agents/voice_sleeptime_agent.py @@ -1,4 +1,9 @@ -from typing import AsyncGenerator, List, Optional, Tuple, Union +from typing import TYPE_CHECKING, AsyncGenerator, List, Optional, Tuple, Union + +if TYPE_CHECKING: + from opentelemetry.trace import Span + + from letta.schemas.tool_execution_result import ToolExecutionResult from letta.agents.helpers import _create_letta_response, serialize_message_history from letta.agents.letta_agent import LettaAgent @@ -89,7 +94,7 @@ class VoiceSleeptimeAgent(LettaAgent): current_in_context_messages, new_in_context_messages, stop_reason, usage = await super()._step( agent_state=agent_state, input_messages=input_messages, max_steps=max_steps ) - new_in_context_messages, updated = await self.summarizer.summarize( + new_in_context_messages, _updated = await self.summarizer.summarize( in_context_messages=current_in_context_messages, new_letta_messages=new_in_context_messages ) self.agent_manager.set_in_context_messages( diff --git a/letta/cli/cli.py b/letta/cli/cli.py index 47e86509..e566ca62 100644 --- a/letta/cli/cli.py +++ b/letta/cli/cli.py @@ -5,7 +5,6 @@ from typing import Annotated, Optional import typer from letta.log import get_logger -from letta.streaming_interface import StreamingRefreshCLIInterface as interface # for printing to terminal logger = get_logger(__name__) diff --git a/letta/config_file.py b/letta/config_file.py new file mode 100644 index 00000000..71c00a7c --- /dev/null +++ b/letta/config_file.py @@ -0,0 +1,232 @@ +""" +Letta Configuration File Support + +Loads hierarchical YAML config and maps it to environment variables. + +Supported top-level keys and their env var prefixes: + letta: -> LETTA_* + model: -> * (provider-prefixed: OPENAI_*, ANTHROPIC_*, etc.) + tool: -> * (prefix-based: E2B_*, MCP_*, TOOL_*, etc.) + datadog: -> DD_* + +Config file format: + letta: + telemetry: + enable_datadog: true + pg: + host: localhost + model: + openai: + api_key: sk-xxx + anthropic: + api_key: sk-yyy + tool: + e2b: + api_key: xxx + mcp: + disable_stdio: true + datadog: + site: us5.datadoghq.com + service: memgpt-server + +This maps to environment variables: + LETTA_TELEMETRY_ENABLE_DATADOG=true + LETTA_PG_HOST=localhost + OPENAI_API_KEY=sk-xxx + ANTHROPIC_API_KEY=sk-yyy + E2B_API_KEY=xxx + MCP_DISABLE_STDIO=true + DD_SITE=us5.datadoghq.com + DD_SERVICE=memgpt-server + +Config file locations (in order of precedence): + 1. ~/.letta/conf.yaml + 2. ./conf.yaml + 3. LETTA_CONFIG_PATH environment variable +""" + +import os +from pathlib import Path +from typing import Any + +import yaml + +# Config file locations +DEFAULT_USER_CONFIG = Path.home() / ".letta" / "conf.yaml" +DEFAULT_PROJECT_CONFIG = Path.cwd() / "conf.yaml" + + +def load_config_file(config_path: str | Path | None = None) -> dict[str, Any]: + """ + Load configuration from YAML file. + + Args: + config_path: Optional explicit path to config file + + Returns: + Loaded config dict, or empty dict if no config found + """ + paths_to_check = [] + + # Check in order of precedence (lowest to highest) + if DEFAULT_USER_CONFIG.exists(): + paths_to_check.append(DEFAULT_USER_CONFIG) + + if DEFAULT_PROJECT_CONFIG.exists(): + paths_to_check.append(DEFAULT_PROJECT_CONFIG) + + # Environment variable override + env_path = os.environ.get("LETTA_CONFIG_PATH") + if env_path and Path(env_path).exists(): + paths_to_check.append(Path(env_path)) + + # Explicit path has highest precedence + if config_path: + p = Path(config_path) + if p.exists(): + paths_to_check.append(p) + + # Merge configs (later files override earlier) + config: dict[str, Any] = {} + for path in paths_to_check: + try: + with open(path, "r") as f: + file_config = yaml.safe_load(f) + if file_config: + config = _deep_merge(config, file_config) + except Exception: + pass + + return config + + +def _deep_merge(base: dict, override: dict) -> dict: + """Deep merge two dicts, override values take precedence.""" + result = base.copy() + for key, value in override.items(): + if key in result and isinstance(result[key], dict) and isinstance(value, dict): + result[key] = _deep_merge(result[key], value) + else: + result[key] = value + return result + + +def _flatten_with_prefix(d: dict, prefix: str, env_vars: dict[str, str]) -> None: + """Flatten a dict with a given prefix.""" + for key, value in d.items(): + env_key = f"{prefix}_{key}".upper() if prefix else key.upper() + if isinstance(value, dict): + _flatten_with_prefix(value, env_key, env_vars) + elif value is not None: + if isinstance(value, bool): + env_vars[env_key] = str(value).lower() + else: + env_vars[env_key] = str(value) + + +def _flatten_model_settings(d: dict, env_vars: dict[str, str]) -> None: + """ + Flatten model settings where nested keys become prefixes. + + model: + openai: + api_key: xxx -> OPENAI_API_KEY + api_base: yyy -> OPENAI_API_BASE + anthropic: + api_key: zzz -> ANTHROPIC_API_KEY + global_max_context_window_limit: 32000 -> GLOBAL_MAX_CONTEXT_WINDOW_LIMIT + """ + for key, value in d.items(): + if isinstance(value, dict): + # Nested provider config: openai.api_key -> OPENAI_API_KEY + _flatten_with_prefix(value, key.upper(), env_vars) + elif value is not None: + # Top-level model setting + env_key = key.upper() + if isinstance(value, bool): + env_vars[env_key] = str(value).lower() + else: + env_vars[env_key] = str(value) + + +def _flatten_tool_settings(d: dict, env_vars: dict[str, str]) -> None: + """ + Flatten tool settings where nested keys become prefixes. + + tool: + e2b: + api_key: xxx -> E2B_API_KEY + sandbox_template_id: y -> E2B_SANDBOX_TEMPLATE_ID + mcp: + disable_stdio: true -> MCP_DISABLE_STDIO + tool_sandbox_timeout: 180 -> TOOL_SANDBOX_TIMEOUT + """ + for key, value in d.items(): + if isinstance(value, dict): + # Nested tool config: e2b.api_key -> E2B_API_KEY + _flatten_with_prefix(value, key.upper(), env_vars) + elif value is not None: + # Top-level tool setting + env_key = key.upper() + if isinstance(value, bool): + env_vars[env_key] = str(value).lower() + else: + env_vars[env_key] = str(value) + + +def config_to_env_vars(config: dict[str, Any]) -> dict[str, str]: + """ + Convert hierarchical config to flat environment variables. + + Supports multiple top-level keys with different prefix behaviors: + - letta: -> LETTA_* prefix + - model: -> provider-prefixed (OPENAI_*, ANTHROPIC_*, etc.) + - tool: -> prefix-based (E2B_*, MCP_*, TOOL_*, etc.) + - datadog: -> DD_* prefix + + Args: + config: Hierarchical config dict + + Returns: + Dict of environment variable name -> value + """ + env_vars: dict[str, str] = {} + + # Handle 'letta' section with LETTA_ prefix + if "letta" in config: + _flatten_with_prefix(config["letta"], "LETTA", env_vars) + + # Handle 'model' section (provider-prefixed env vars) + if "model" in config: + _flatten_model_settings(config["model"], env_vars) + + # Handle 'tool' section (prefix-based env vars) + if "tool" in config: + _flatten_tool_settings(config["tool"], env_vars) + + # Handle 'datadog' section with DD_ prefix + if "datadog" in config: + _flatten_with_prefix(config["datadog"], "DD", env_vars) + + return env_vars + + +def apply_config_to_env(config_path: str | Path | None = None) -> None: + """ + Load config file and apply values to environment variables. + + Environment variables already set take precedence over config file values. + + Args: + config_path: Optional explicit path to config file + """ + config = load_config_file(config_path) + if not config: + return + + env_vars = config_to_env_vars(config) + + for key, value in env_vars.items(): + # Only set if not already in environment (env vars take precedence) + if key not in os.environ: + os.environ[key] = value diff --git a/letta/constants.py b/letta/constants.py index a6b53c6d..e36b0b32 100644 --- a/letta/constants.py +++ b/letta/constants.py @@ -78,7 +78,7 @@ DEFAULT_CONTEXT_WINDOW = 32000 # Summarization trigger threshold (multiplier of context_window limit) # Summarization triggers when step usage > context_window * SUMMARIZATION_TRIGGER_MULTIPLIER -SUMMARIZATION_TRIGGER_MULTIPLIER = 1.0 +SUMMARIZATION_TRIGGER_MULTIPLIER = 0.9 # using instead of 1.0 to avoid "too many tokens in prompt" fallbacks # number of concurrent embedding requests to sent EMBEDDING_BATCH_SIZE = 200 @@ -252,8 +252,11 @@ LLM_MAX_CONTEXT_WINDOW = { "deepseek-chat": 64000, "deepseek-reasoner": 64000, # glm (Z.AI) - "glm-4.6": 200000, "glm-4.5": 128000, + "glm-4.6": 200000, + "glm-4.7": 200000, + "glm-5": 200000, + "glm-5-code": 200000, ## OpenAI models: https://platform.openai.com/docs/models/overview # gpt-5 "gpt-5": 272000, @@ -383,6 +386,7 @@ LLM_MAX_CONTEXT_WINDOW = { "gemini-2.5-computer-use-preview-10-2025": 1048576, # gemini 3 "gemini-3-pro-preview": 1048576, + "gemini-3.1-pro-preview": 1048576, "gemini-3-flash-preview": 1048576, # gemini latest aliases "gemini-flash-latest": 1048576, @@ -457,10 +461,18 @@ REDIS_RUN_ID_PREFIX = "agent:send_message:run_id" CONVERSATION_LOCK_PREFIX = "conversation:lock:" CONVERSATION_LOCK_TTL_SECONDS = 300 # 5 minutes +# Memory repo locks - prevents concurrent modifications to git-based memory +MEMORY_REPO_LOCK_PREFIX = "memory_repo:lock:" +MEMORY_REPO_LOCK_TTL_SECONDS = 60 # 1 minute (git operations should be fast) + # TODO: This is temporary, eventually use token-based eviction # File based controls DEFAULT_MAX_FILES_OPEN = 5 DEFAULT_CORE_MEMORY_SOURCE_CHAR_LIMIT: int = 50000 +# Max values for file controls (int32 limit to match database INTEGER type) +MAX_INT32: int = 2147483647 +MAX_PER_FILE_VIEW_WINDOW_CHAR_LIMIT: int = MAX_INT32 +MAX_FILES_OPEN_LIMIT: int = 1000 # Practical limit - no agent needs 1000+ files open GET_PROVIDERS_TIMEOUT_SECONDS = 10 diff --git a/letta/data_sources/connectors.py b/letta/data_sources/connectors.py index cfafe2a2..1cfd2b21 100644 --- a/letta/data_sources/connectors.py +++ b/letta/data_sources/connectors.py @@ -1,4 +1,7 @@ -from typing import Dict, Iterator, List, Tuple +from typing import TYPE_CHECKING, Dict, Iterator, List, Tuple + +if TYPE_CHECKING: + from letta.schemas.user import User import typer @@ -143,7 +146,13 @@ async def load_data(connector: DataConnector, source: Source, passage_manager: P class DirectoryConnector(DataConnector): - def __init__(self, input_files: List[str] = None, input_directory: str = None, recursive: bool = False, extensions: List[str] = None): + def __init__( + self, + input_files: List[str] | None = None, + input_directory: str | None = None, + recursive: bool = False, + extensions: List[str] | None = None, + ): """ Connector for reading text data from a directory of files. diff --git a/letta/data_sources/redis_client.py b/letta/data_sources/redis_client.py index 8c4f5fec..c1f7a098 100644 --- a/letta/data_sources/redis_client.py +++ b/letta/data_sources/redis_client.py @@ -2,8 +2,16 @@ import asyncio from functools import wraps from typing import Any, Dict, List, Optional, Set, Union -from letta.constants import CONVERSATION_LOCK_PREFIX, CONVERSATION_LOCK_TTL_SECONDS, REDIS_EXCLUDE, REDIS_INCLUDE, REDIS_SET_DEFAULT_VAL -from letta.errors import ConversationBusyError +from letta.constants import ( + CONVERSATION_LOCK_PREFIX, + CONVERSATION_LOCK_TTL_SECONDS, + MEMORY_REPO_LOCK_PREFIX, + MEMORY_REPO_LOCK_TTL_SECONDS, + REDIS_EXCLUDE, + REDIS_INCLUDE, + REDIS_SET_DEFAULT_VAL, +) +from letta.errors import ConversationBusyError, MemoryRepoBusyError from letta.log import get_logger from letta.settings import settings @@ -141,7 +149,7 @@ class AsyncRedisClient: try: client = await self.get_client() return await client.get(key) - except: + except Exception: return default @with_retry() @@ -230,6 +238,64 @@ class AsyncRedisClient: logger.warning(f"Failed to release conversation lock for conversation {conversation_id}: {e}") return False + async def acquire_memory_repo_lock( + self, + agent_id: str, + token: str, + ) -> Optional["Lock"]: + """ + Acquire a distributed lock for a memory repository. + + Prevents concurrent modifications to an agent's git-based memory. + + Args: + agent_id: The agent ID whose memory is being modified + token: Unique identifier for the lock holder (for debugging/tracing) + + Returns: + Lock object if acquired, raises MemoryRepoBusyError if in use + """ + if Lock is None: + return None + client = await self.get_client() + lock_key = f"{MEMORY_REPO_LOCK_PREFIX}{agent_id}" + lock = Lock( + client, + lock_key, + timeout=MEMORY_REPO_LOCK_TTL_SECONDS, + blocking=False, + thread_local=False, + raise_on_release_error=False, + ) + + if await lock.acquire(token=token): + return lock + + lock_holder_token = await client.get(lock_key) + raise MemoryRepoBusyError( + agent_id=agent_id, + lock_holder_token=lock_holder_token, + ) + + async def release_memory_repo_lock(self, agent_id: str) -> bool: + """ + Release a memory repo lock by agent_id. + + Args: + agent_id: The agent ID to release the lock for + + Returns: + True if lock was released, False if release failed + """ + try: + client = await self.get_client() + lock_key = f"{MEMORY_REPO_LOCK_PREFIX}{agent_id}" + await client.delete(lock_key) + return True + except Exception as e: + logger.warning(f"Failed to release memory repo lock for agent {agent_id}: {e}") + return False + @with_retry() async def exists(self, *keys: str) -> int: """Check if keys exist.""" @@ -254,7 +320,7 @@ class AsyncRedisClient: client = await self.get_client() result = await client.smismember(key, values) return result if isinstance(values, list) else result[0] - except: + except Exception: return [0] * len(values) if isinstance(values, list) else 0 async def srem(self, key: str, *members: Union[str, int, float]) -> int: @@ -464,6 +530,16 @@ class NoopAsyncRedisClient(AsyncRedisClient): async def release_conversation_lock(self, conversation_id: str) -> bool: return False + async def acquire_memory_repo_lock( + self, + agent_id: str, + token: str, + ) -> Optional["Lock"]: + return None + + async def release_memory_repo_lock(self, agent_id: str) -> bool: + return False + async def check_inclusion_and_exclusion(self, member: str, group: str) -> bool: return False diff --git a/letta/errors.py b/letta/errors.py index 5a2eb849..f725b2b5 100644 --- a/letta/errors.py +++ b/letta/errors.py @@ -4,6 +4,7 @@ from typing import TYPE_CHECKING, Dict, List, Optional, Union # Avoid circular imports if TYPE_CHECKING: + from letta.schemas.letta_message import LettaMessage from letta.schemas.message import Message @@ -20,6 +21,7 @@ class ErrorCode(Enum): TIMEOUT = "TIMEOUT" CONFLICT = "CONFLICT" EXPIRED = "EXPIRED" + PAYMENT_REQUIRED = "PAYMENT_REQUIRED" class LettaError(Exception): @@ -91,6 +93,22 @@ class ConversationBusyError(LettaError): super().__init__(message=message, code=code, details=details) +class MemoryRepoBusyError(LettaError): + """Error raised when attempting to modify memory while another operation is in progress.""" + + def __init__(self, agent_id: str, lock_holder_token: Optional[str] = None): + self.agent_id = agent_id + self.lock_holder_token = lock_holder_token + message = "Cannot modify memory: Another operation is currently in progress for this agent's memory. Please wait for the current operation to complete." + code = ErrorCode.CONFLICT + details = { + "error_code": "MEMORY_REPO_BUSY", + "agent_id": agent_id, + "lock_holder_token": lock_holder_token, + } + super().__init__(message=message, code=code, details=details) + + class LettaToolCreateError(LettaError): """Error raised when a tool cannot be created.""" @@ -167,7 +185,9 @@ class LettaImageFetchError(LettaError): def __init__(self, url: str, reason: str): details = {"url": url, "reason": reason} super().__init__( - message=f"Failed to fetch image from {url}: {reason}", code=ErrorCode.INVALID_ARGUMENT, details=details, + message=f"Failed to fetch image from {url}: {reason}", + code=ErrorCode.INVALID_ARGUMENT, + details=details, ) @@ -238,6 +258,10 @@ class LLMBadRequestError(LLMError): """Error when LLM service cannot process request""" +class LLMInsufficientCreditsError(LLMError): + """Error when LLM provider reports insufficient credits or quota""" + + class LLMAuthenticationError(LLMError): """Error when authentication fails with LLM service""" @@ -308,7 +332,9 @@ class ContextWindowExceededError(LettaError): def __init__(self, message: str, details: dict = {}): error_message = f"{message} ({details})" super().__init__( - message=error_message, code=ErrorCode.CONTEXT_WINDOW_EXCEEDED, details=details, + message=error_message, + code=ErrorCode.CONTEXT_WINDOW_EXCEEDED, + details=details, ) @@ -328,7 +354,9 @@ class RateLimitExceededError(LettaError): def __init__(self, message: str, max_retries: int): error_message = f"{message} ({max_retries})" super().__init__( - message=error_message, code=ErrorCode.RATE_LIMIT_EXCEEDED, details={"max_retries": max_retries}, + message=error_message, + code=ErrorCode.RATE_LIMIT_EXCEEDED, + details={"max_retries": max_retries}, ) @@ -383,7 +411,8 @@ class HandleNotFoundError(LettaError): def __init__(self, handle: str, available_handles: List[str]): super().__init__( - message=f"Handle {handle} not found, must be one of {available_handles}", code=ErrorCode.NOT_FOUND, + message=f"Handle {handle} not found, must be one of {available_handles}", + code=ErrorCode.NOT_FOUND, ) @@ -423,6 +452,16 @@ class AgentFileImportError(Exception): """Exception raised during agent file import operations""" +class InsufficientCreditsError(LettaError): + """Raised when an organization has no remaining credits.""" + + def __init__(self): + super().__init__( + message="Insufficient credits to process this request.", + details={"error_code": "INSUFFICIENT_CREDITS"}, + ) + + class RunCancelError(LettaError): """Error raised when a run cannot be cancelled.""" diff --git a/letta/functions/function_sets/base.py b/letta/functions/function_sets/base.py index 45cede97..56e79183 100644 --- a/letta/functions/function_sets/base.py +++ b/letta/functions/function_sets/base.py @@ -1,10 +1,11 @@ -from typing import TYPE_CHECKING, Any, List, Literal, Optional - -from letta.constants import CORE_MEMORY_LINE_NUMBER_WARNING +from typing import TYPE_CHECKING, List, Literal, Optional if TYPE_CHECKING: + from letta.agents.letta_agent import LettaAgent as Agent from letta.schemas.agent import AgentState +from letta.constants import CORE_MEMORY_LINE_NUMBER_WARNING + def memory( agent_state: "AgentState", @@ -242,7 +243,7 @@ async def archival_memory_search( raise NotImplementedError("This should never be invoked directly. Contact Letta if you see this error message.") -def core_memory_append(agent_state: "AgentState", label: str, content: str) -> Optional[str]: # type: ignore +def core_memory_append(agent_state: "AgentState", label: str, content: str) -> str: # type: ignore """ Append to the contents of core memory. @@ -251,15 +252,15 @@ def core_memory_append(agent_state: "AgentState", label: str, content: str) -> O content (str): Content to write to the memory. All unicode (including emojis) are supported. Returns: - Optional[str]: None is always returned as this function does not produce a response. + str: The updated value of the memory block. """ current_value = str(agent_state.memory.get_block(label).value) new_value = current_value + "\n" + str(content) agent_state.memory.update_block_value(label=label, value=new_value) - return None + return new_value -def core_memory_replace(agent_state: "AgentState", label: str, old_content: str, new_content: str) -> Optional[str]: # type: ignore +def core_memory_replace(agent_state: "AgentState", label: str, old_content: str, new_content: str) -> str: # type: ignore """ Replace the contents of core memory. To delete memories, use an empty string for new_content. @@ -269,14 +270,14 @@ def core_memory_replace(agent_state: "AgentState", label: str, old_content: str, new_content (str): Content to write to the memory. All unicode (including emojis) are supported. Returns: - Optional[str]: None is always returned as this function does not produce a response. + str: The updated value of the memory block. """ current_value = str(agent_state.memory.get_block(label).value) if old_content not in current_value: raise ValueError(f"Old content '{old_content}' not found in memory block '{label}'") new_value = current_value.replace(str(old_content), str(new_content)) agent_state.memory.update_block_value(label=label, value=new_value) - return None + return new_value def rethink_memory(agent_state: "AgentState", new_memory: str, target_block_label: str) -> None: @@ -307,125 +308,118 @@ SNIPPET_LINES: int = 4 # Based off of: https://github.com/anthropics/anthropic-quickstarts/blob/main/computer-use-demo/computer_use_demo/tools/edit.py?ref=musings.yasyf.com#L154 -def memory_replace(agent_state: "AgentState", label: str, old_str: str, new_str: str) -> str: # type: ignore +def memory_replace(agent_state: "AgentState", label: str, old_string: str, new_string: str) -> str: # type: ignore """ The memory_replace command allows you to replace a specific string in a memory block with a new string. This is used for making precise edits. Do NOT attempt to replace long strings, e.g. do not attempt to replace the entire contents of a memory block with a new string. Args: label (str): Section of the memory to be edited, identified by its label. - old_str (str): The text to replace (must match exactly, including whitespace and indentation). - new_str (str): The new text to insert in place of the old text. Do not include line number prefixes. + old_string (str): The text to replace (must match exactly, including whitespace and indentation). + new_string (str): The new text to insert in place of the old text. Do not include line number prefixes. Examples: # Update a block containing information about the user - memory_replace(label="human", old_str="Their name is Alice", new_str="Their name is Bob") + memory_replace(label="human", old_string="Their name is Alice", new_string="Their name is Bob") # Update a block containing a todo list - memory_replace(label="todos", old_str="- [ ] Step 5: Search the web", new_str="- [x] Step 5: Search the web") + memory_replace(label="todos", old_string="- [ ] Step 5: Search the web", new_string="- [x] Step 5: Search the web") # Pass an empty string to - memory_replace(label="human", old_str="Their name is Alice", new_str="") + memory_replace(label="human", old_string="Their name is Alice", new_string="") # Bad example - do NOT add (view-only) line numbers to the args - memory_replace(label="human", old_str="1: Their name is Alice", new_str="1: Their name is Bob") + memory_replace(label="human", old_string="1: Their name is Alice", new_string="1: Their name is Bob") # Bad example - do NOT include the line number warning either - memory_replace(label="human", old_str="# NOTE: Line numbers shown below (with arrows like '1→') are to help during editing. Do NOT include line number prefixes in your memory edit tool calls.\\n1→ Their name is Alice", new_str="1→ Their name is Bob") + memory_replace(label="human", old_string="# NOTE: Line numbers shown below (with arrows like '1→') are to help during editing. Do NOT include line number prefixes in your memory edit tool calls.\\n1→ Their name is Alice", new_string="1→ Their name is Bob") # Good example - no line numbers or line number warning (they are view-only), just the text - memory_replace(label="human", old_str="Their name is Alice", new_str="Their name is Bob") + memory_replace(label="human", old_string="Their name is Alice", new_string="Their name is Bob") Returns: - str: The success message + str: The updated value of the memory block. """ import re - if bool(re.search(r"\nLine \d+: ", old_str)): + if bool(re.search(r"\nLine \d+: ", old_string)): raise ValueError( - "old_str contains a line number prefix, which is not allowed. Do not include line numbers when calling memory tools (line numbers are for display purposes only)." + "old_string contains a line number prefix, which is not allowed. Do not include line numbers when calling memory tools (line numbers are for display purposes only)." ) - if CORE_MEMORY_LINE_NUMBER_WARNING in old_str: + if CORE_MEMORY_LINE_NUMBER_WARNING in old_string: raise ValueError( - "old_str contains a line number warning, which is not allowed. Do not include line number information when calling memory tools (line numbers are for display purposes only)." + "old_string contains a line number warning, which is not allowed. Do not include line number information when calling memory tools (line numbers are for display purposes only)." ) - if bool(re.search(r"\nLine \d+: ", new_str)): + if bool(re.search(r"\nLine \d+: ", new_string)): raise ValueError( - "new_str contains a line number prefix, which is not allowed. Do not include line numbers when calling memory tools (line numbers are for display purposes only)." + "new_string contains a line number prefix, which is not allowed. Do not include line numbers when calling memory tools (line numbers are for display purposes only)." ) - old_str = str(old_str).expandtabs() - new_str = str(new_str).expandtabs() + old_string = str(old_string).expandtabs() + new_string = str(new_string).expandtabs() current_value = str(agent_state.memory.get_block(label).value).expandtabs() - # Check if old_str is unique in the block - occurences = current_value.count(old_str) + # Check if old_string is unique in the block + occurences = current_value.count(old_string) if occurences == 0: - raise ValueError(f"No replacement was performed, old_str `{old_str}` did not appear verbatim in memory block with label `{label}`.") + raise ValueError( + f"No replacement was performed, old_string `{old_string}` did not appear verbatim in memory block with label `{label}`." + ) elif occurences > 1: content_value_lines = current_value.split("\n") - lines = [idx + 1 for idx, line in enumerate(content_value_lines) if old_str in line] + lines = [idx + 1 for idx, line in enumerate(content_value_lines) if old_string in line] raise ValueError( - f"No replacement was performed. Multiple occurrences of old_str `{old_str}` in lines {lines}. Please ensure it is unique." + f"No replacement was performed. Multiple occurrences of old_string `{old_string}` in lines {lines}. Please ensure it is unique." ) - # Replace old_str with new_str - new_value = current_value.replace(str(old_str), str(new_str)) + # Replace old_string with new_string + new_value = current_value.replace(str(old_string), str(new_string)) # Write the new content to the block agent_state.memory.update_block_value(label=label, value=new_value) # Create a snippet of the edited section # SNIPPET_LINES = 3 - # replacement_line = current_value.split(old_str)[0].count("\n") + # replacement_line = current_value.split(old_string)[0].count("\n") # start_line = max(0, replacement_line - SNIPPET_LINES) - # end_line = replacement_line + SNIPPET_LINES + new_str.count("\n") + # end_line = replacement_line + SNIPPET_LINES + new_string.count("\n") # snippet = "\n".join(new_value.split("\n")[start_line : end_line + 1]) - # Prepare the success message - success_msg = ( - f"The core memory block with label `{label}` has been successfully edited. " - f"Your system prompt has been recompiled with the updated memory contents and is now active in your context. " - f"Review the changes and make sure they are as expected (correct indentation, " - f"no duplicate lines, etc). Edit the memory block again if necessary." - ) - - # return None - return success_msg + return new_value -def memory_insert(agent_state: "AgentState", label: str, new_str: str, insert_line: int = -1) -> Optional[str]: # type: ignore +def memory_insert(agent_state: "AgentState", label: str, new_string: str, insert_line: int = -1) -> str: # type: ignore """ The memory_insert command allows you to insert text at a specific location in a memory block. Args: label (str): Section of the memory to be edited, identified by its label. - new_str (str): The text to insert. Do not include line number prefixes. + new_string (str): The text to insert. Do not include line number prefixes. insert_line (int): The line number after which to insert the text (0 for beginning of file). Defaults to -1 (end of the file). Examples: # Update a block containing information about the user (append to the end of the block) - memory_insert(label="customer", new_str="The customer's ticket number is 12345") + memory_insert(label="customer", new_string="The customer's ticket number is 12345") # Update a block containing information about the user (insert at the beginning of the block) - memory_insert(label="customer", new_str="The customer's ticket number is 12345", insert_line=0) + memory_insert(label="customer", new_string="The customer's ticket number is 12345", insert_line=0) Returns: Optional[str]: None is always returned as this function does not produce a response. """ import re - if bool(re.search(r"\nLine \d+: ", new_str)): + if bool(re.search(r"\nLine \d+: ", new_string)): raise ValueError( - "new_str contains a line number prefix, which is not allowed. Do not include line numbers when calling memory tools (line numbers are for display purposes only)." + "new_string contains a line number prefix, which is not allowed. Do not include line numbers when calling memory tools (line numbers are for display purposes only)." ) - if CORE_MEMORY_LINE_NUMBER_WARNING in new_str: + if CORE_MEMORY_LINE_NUMBER_WARNING in new_string: raise ValueError( - "new_str contains a line number warning, which is not allowed. Do not include line number information when calling memory tools (line numbers are for display purposes only)." + "new_string contains a line number warning, which is not allowed. Do not include line number information when calling memory tools (line numbers are for display purposes only)." ) current_value = str(agent_state.memory.get_block(label).value).expandtabs() - new_str = str(new_str).expandtabs() + new_string = str(new_string).expandtabs() current_value_lines = current_value.split("\n") n_lines = len(current_value_lines) @@ -438,11 +432,11 @@ def memory_insert(agent_state: "AgentState", label: str, new_str: str, insert_li ) # Insert the new string as a line - new_str_lines = new_str.split("\n") - new_value_lines = current_value_lines[:insert_line] + new_str_lines + current_value_lines[insert_line:] - snippet_lines = ( + new_string_lines = new_string.split("\n") + new_value_lines = current_value_lines[:insert_line] + new_string_lines + current_value_lines[insert_line:] + ( current_value_lines[max(0, insert_line - SNIPPET_LINES) : insert_line] - + new_str_lines + + new_string_lines + current_value_lines[insert_line : insert_line + SNIPPET_LINES] ) @@ -453,15 +447,7 @@ def memory_insert(agent_state: "AgentState", label: str, new_str: str, insert_li # Write into the block agent_state.memory.update_block_value(label=label, value=new_value) - # Prepare the success message - success_msg = ( - f"The core memory block with label `{label}` has been successfully edited. " - f"Your system prompt has been recompiled with the updated memory contents and is now active in your context. " - f"Review the changes and make sure they are as expected (correct indentation, " - f"no duplicate lines, etc). Edit the memory block again if necessary." - ) - - return success_msg + return new_value def memory_apply_patch(agent_state: "AgentState", label: str, patch: str) -> str: # type: ignore @@ -499,7 +485,7 @@ def memory_apply_patch(agent_state: "AgentState", label: str, patch: str) -> str raise NotImplementedError("This should never be invoked directly. Contact Letta if you see this error message.") -def memory_rethink(agent_state: "AgentState", label: str, new_memory: str) -> None: +def memory_rethink(agent_state: "AgentState", label: str, new_memory: str) -> str: """ The memory_rethink command allows you to completely rewrite the contents of a memory block. Use this tool to make large sweeping changes (e.g. when you want to condense or reorganize the memory blocks), do NOT use this tool to make small precise edits (e.g. add or remove a line, replace a specific string, etc). @@ -528,17 +514,7 @@ def memory_rethink(agent_state: "AgentState", label: str, new_memory: str) -> No agent_state.memory.set_block(new_block) agent_state.memory.update_block_value(label=label, value=new_memory) - - # Prepare the success message - success_msg = ( - f"The core memory block with label `{label}` has been successfully edited. " - f"Your system prompt has been recompiled with the updated memory contents and is now active in your context. " - f"Review the changes and make sure they are as expected (correct indentation, " - f"no duplicate lines, etc). Edit the memory block again if necessary." - ) - - # return None - return success_msg + return new_memory def memory_finish_edits(agent_state: "AgentState") -> None: # type: ignore diff --git a/letta/functions/function_sets/multi_agent.py b/letta/functions/function_sets/multi_agent.py index 100bf737..43c90109 100644 --- a/letta/functions/function_sets/multi_agent.py +++ b/letta/functions/function_sets/multi_agent.py @@ -1,12 +1,13 @@ import asyncio -import json -from concurrent.futures import ThreadPoolExecutor, as_completed from typing import TYPE_CHECKING, List +if TYPE_CHECKING: + from letta.agents.letta_agent import LettaAgent as Agent + from letta.functions.helpers import ( + _send_message_to_agents_matching_tags_async, _send_message_to_all_agents_in_group_async, execute_send_message_to_agent, - extract_send_message_from_steps_messages, fire_and_forget_send_to_agent, ) from letta.schemas.enums import MessageRole @@ -27,9 +28,7 @@ def send_message_to_agent_and_wait_for_reply(self: "Agent", message: str, other_ str: The response from the target agent. """ augmented_message = ( - f"[Incoming message from agent with ID '{self.agent_state.id}' - to reply to this message, " - f"make sure to use the 'send_message' at the end, and the system will notify the sender of your response] " - f"{message}" + f"[Incoming message from agent with ID '{self.agent_state.id}' - your response will be delivered to the sender] {message}" ) messages = [MessageCreate(role=MessageRole.system, content=augmented_message, name=self.agent_state.name)] @@ -56,57 +55,18 @@ def send_message_to_agents_matching_tags(self: "Agent", message: str, match_all: in the returned list. """ server = get_letta_server() - augmented_message = ( - f"[Incoming message from external Letta agent - to reply to this message, " - f"make sure to use the 'send_message' at the end, and the system will notify the sender of your response] " - f"{message}" - ) + augmented_message = f"[Incoming message from external Letta agent - your response will be delivered to the sender] {message}" # Find matching agents matching_agents = server.agent_manager.list_agents_matching_tags(actor=self.user, match_all=match_all, match_some=match_some) if not matching_agents: return [] - def process_agent(agent_id: str) -> str: - """Loads an agent, formats the message, and executes .step()""" - actor = self.user # Ensure correct actor context - agent = server.load_agent(agent_id=agent_id, interface=None, actor=actor) + # Prepare the message + messages = [MessageCreate(role=MessageRole.system, content=augmented_message, name=self.agent_state.name)] - # Prepare the message - messages = [MessageCreate(role=MessageRole.system, content=augmented_message, name=self.agent_state.name)] - - # Run .step() and return the response - usage_stats = agent.step( - input_messages=messages, - chaining=True, - max_chaining_steps=None, - stream=False, - skip_verify=True, - metadata=None, - put_inner_thoughts_first=True, - ) - - send_messages = extract_send_message_from_steps_messages(usage_stats.steps_messages, logger=agent.logger) - response_data = { - "agent_id": agent_id, - "response_messages": send_messages if send_messages else [""], - } - - return json.dumps(response_data, indent=2) - - # Use ThreadPoolExecutor for parallel execution - results = [] - with ThreadPoolExecutor(max_workers=settings.multi_agent_concurrent_sends) as executor: - future_to_agent = {executor.submit(process_agent, agent_state.id): agent_state for agent_state in matching_agents} - - for future in as_completed(future_to_agent): - try: - results.append(future.result()) # Collect results - except Exception as e: - # Log or handle failure for specific agents if needed - self.logger.exception(f"Error processing agent {future_to_agent[future]}: {e}") - - return results + # Use async helper for parallel message sending + return asyncio.run(_send_message_to_agents_matching_tags_async(self, server, messages, matching_agents)) def send_message_to_all_agents_in_group(self: "Agent", message: str) -> List[str]: @@ -138,8 +98,8 @@ def send_message_to_agent_async(self: "Agent", message: str, other_agent_id: str raise RuntimeError("This tool is not allowed to be run on Letta Cloud.") message = ( - f"[Incoming message from agent with ID '{self.agent_state.id}' - to reply to this message, " - f"make sure to use the 'send_message_to_agent_async' tool, or the agent will not receive your message] " + f"[Incoming message from agent with ID '{self.agent_state.id}' - " + f"this is a one-way notification; if you need to respond, use an agent-to-agent messaging tool if available] " f"{message}" ) messages = [MessageCreate(role=MessageRole.system, content=message, name=self.agent_state.name)] diff --git a/letta/functions/function_sets/voice.py b/letta/functions/function_sets/voice.py index dbe16993..c46188a2 100644 --- a/letta/functions/function_sets/voice.py +++ b/letta/functions/function_sets/voice.py @@ -1,5 +1,8 @@ ## Voice chat + sleeptime tools -from typing import List, Optional +from typing import TYPE_CHECKING, List, Optional + +if TYPE_CHECKING: + from letta.schemas.agent import AgentState from pydantic import BaseModel, Field diff --git a/letta/functions/functions.py b/letta/functions/functions.py index c35a48c6..01c7d4e2 100644 --- a/letta/functions/functions.py +++ b/letta/functions/functions.py @@ -179,7 +179,7 @@ def _extract_pydantic_classes(tree: ast.AST, imports_map: Dict[str, Any]) -> Dic pass # Field is required, no default else: field_kwargs["default"] = default_val - except: + except Exception: pass fields[field_name] = Field(**field_kwargs) @@ -188,7 +188,7 @@ def _extract_pydantic_classes(tree: ast.AST, imports_map: Dict[str, Any]) -> Dic try: default_val = ast.literal_eval(stmt.value) fields[field_name] = default_val - except: + except Exception: pass # Create the dynamic Pydantic model diff --git a/letta/functions/helpers.py b/letta/functions/helpers.py index 0e11e4b8..ce023a4b 100644 --- a/letta/functions/helpers.py +++ b/letta/functions/helpers.py @@ -3,7 +3,17 @@ import json import logging import threading from random import uniform -from typing import Any, Dict, List, Optional, Type, Union +from typing import TYPE_CHECKING, Any, Dict, List, Optional, Type, Union + +if TYPE_CHECKING: + from letta.agents.letta_agent import LettaAgent as Agent + from letta.schemas.agent import AgentState + from letta.server.server import SyncServer + + try: + from langchain.tools.base import BaseTool as LangChainBaseTool + except ImportError: + LangChainBaseTool = None import humps from pydantic import BaseModel, Field, create_model @@ -21,6 +31,8 @@ from letta.server.rest_api.dependencies import get_letta_server from letta.settings import settings from letta.utils import safe_create_task +_background_tasks: set[asyncio.Task] = set() + # TODO needed? def generate_mcp_tool_wrapper(mcp_tool_name: str) -> tuple[str, str]: @@ -36,7 +48,8 @@ def {mcp_tool_name}(**kwargs): def generate_langchain_tool_wrapper( - tool: "LangChainBaseTool", additional_imports_module_attr_map: dict[str, str] = None + tool: "LangChainBaseTool", + additional_imports_module_attr_map: dict[str, str] | None = None, ) -> tuple[str, str]: tool_name = tool.__class__.__name__ import_statement = f"from langchain_community.tools import {tool_name}" @@ -428,15 +441,18 @@ def fire_and_forget_send_to_agent( # 4) Try to schedule the coroutine in an existing loop, else spawn a thread try: loop = asyncio.get_running_loop() - # If we get here, a loop is running; schedule the coroutine in background - loop.create_task(background_task()) + task = loop.create_task(background_task()) + _background_tasks.add(task) + task.add_done_callback(_background_tasks.discard) except RuntimeError: - # Means no event loop is running in this thread run_in_background_thread(background_task()) async def _send_message_to_agents_matching_tags_async( - sender_agent: "Agent", server: "SyncServer", messages: List[MessageCreate], matching_agents: List["AgentState"] + sender_agent: "Agent", + server: "SyncServer", + messages: List[MessageCreate], + matching_agents: List["AgentState"], ) -> List[str]: async def _send_single(agent_state): return await _async_send_message_with_retries( @@ -464,9 +480,7 @@ async def _send_message_to_all_agents_in_group_async(sender_agent: "Agent", mess server = get_letta_server() augmented_message = ( - f"[Incoming message from agent with ID '{sender_agent.agent_state.id}' - to reply to this message, " - f"make sure to use the 'send_message' at the end, and the system will notify the sender of your response] " - f"{message}" + f"[Incoming message from agent with ID '{sender_agent.agent_state.id}' - your response will be delivered to the sender] {message}" ) worker_agents_ids = sender_agent.agent_state.multi_agent_group.agent_ids @@ -520,7 +534,9 @@ def generate_model_from_args_json_schema(schema: Dict[str, Any]) -> Type[BaseMod return _create_model_from_schema(schema.get("title", "DynamicModel"), schema, nested_models) -def _create_model_from_schema(name: str, model_schema: Dict[str, Any], nested_models: Dict[str, Type[BaseModel]] = None) -> Type[BaseModel]: +def _create_model_from_schema( + name: str, model_schema: Dict[str, Any], nested_models: Dict[str, Type[BaseModel]] | None = None +) -> Type[BaseModel]: fields = {} for field_name, field_schema in model_schema["properties"].items(): field_type = _get_field_type(field_schema, nested_models) @@ -531,7 +547,7 @@ def _create_model_from_schema(name: str, model_schema: Dict[str, Any], nested_mo return create_model(name, **fields) -def _get_field_type(field_schema: Dict[str, Any], nested_models: Dict[str, Type[BaseModel]] = None) -> Any: +def _get_field_type(field_schema: Dict[str, Any], nested_models: Dict[str, Type[BaseModel]] | None = None) -> Any: """Helper to convert JSON schema types to Python types.""" if field_schema.get("type") == "string": return str diff --git a/letta/functions/mcp_client/types.py b/letta/functions/mcp_client/types.py index 74293d81..e5c11a9a 100644 --- a/letta/functions/mcp_client/types.py +++ b/letta/functions/mcp_client/types.py @@ -98,6 +98,32 @@ class BaseServerConfig(BaseModel): return result + @staticmethod + def _sanitize_dict_key(key: str) -> str: + """Strip surrounding quotes and trailing colons from a dict key.""" + key = key.strip() + for quote in ('"', "'"): + if key.startswith(quote) and key.endswith(quote): + key = key[1:-1] + break + key = key.rstrip(":") + return key.strip() + + @staticmethod + def _sanitize_dict_value(value: str) -> str: + """Strip surrounding quotes from a dict value.""" + value = value.strip() + for quote in ('"', "'"): + if value.startswith(quote) and value.endswith(quote): + value = value[1:-1] + break + return value + + @classmethod + def _sanitize_dict(cls, d: Dict[str, str]) -> Dict[str, str]: + """Sanitize a string dict by stripping quotes from keys and values.""" + return {cls._sanitize_dict_key(k): cls._sanitize_dict_value(v) for k, v in d.items()} + def resolve_custom_headers( self, custom_headers: Optional[Dict[str, str]], environment_variables: Optional[Dict[str, str]] = None ) -> Optional[Dict[str, str]]: @@ -114,6 +140,8 @@ class BaseServerConfig(BaseModel): if custom_headers is None: return None + custom_headers = self._sanitize_dict(custom_headers) + resolved_headers = {} for key, value in custom_headers.items(): # Resolve templated variables in each header value @@ -164,8 +192,12 @@ class HTTPBasedServerConfig(BaseServerConfig): return None def resolve_environment_variables(self, environment_variables: Optional[Dict[str, str]] = None) -> None: - if self.auth_token and super().is_templated_tool_variable(self.auth_token): - self.auth_token = super().get_tool_variable(self.auth_token, environment_variables) + if self.auth_header: + self.auth_header = self._sanitize_dict_key(self.auth_header) + if self.auth_token: + self.auth_token = self._sanitize_dict_value(self.auth_token) + if super().is_templated_tool_variable(self.auth_token): + self.auth_token = super().get_tool_variable(self.auth_token, environment_variables) self.custom_headers = super().resolve_custom_headers(self.custom_headers, environment_variables) @@ -176,11 +208,11 @@ class HTTPBasedServerConfig(BaseServerConfig): Returns: Dictionary of headers or None if no headers are configured """ - if self.custom_headers is not None or (self.auth_header is not None and self.auth_token is not None): + if self.custom_headers is not None or (self.auth_header and self.auth_token): headers = self.custom_headers.copy() if self.custom_headers else {} - # Add auth header if specified - if self.auth_header is not None and self.auth_token is not None: + # Add auth header if specified (skip if either is empty to avoid illegal header values) + if self.auth_header and self.auth_token: headers[self.auth_header] = self.auth_token return headers diff --git a/letta/functions/schema_generator.py b/letta/functions/schema_generator.py index 3f549069..79ab66a9 100644 --- a/letta/functions/schema_generator.py +++ b/letta/functions/schema_generator.py @@ -96,7 +96,7 @@ def type_to_json_schema_type(py_type) -> dict: # Handle array types origin = get_origin(py_type) - if py_type == list or origin in (list, List): + if py_type is list or origin in (list, List): args = get_args(py_type) if len(args) == 0: # is this correct @@ -142,7 +142,7 @@ def type_to_json_schema_type(py_type) -> dict: } # Handle object types - if py_type == dict or origin in (dict, Dict): + if py_type is dict or origin in (dict, Dict): args = get_args(py_type) if not args: # Generic dict without type arguments @@ -704,8 +704,9 @@ def generate_tool_schema_for_mcp( name = mcp_tool.name description = mcp_tool.description - assert "type" in parameters_schema, parameters_schema - assert "properties" in parameters_schema, parameters_schema + if "type" not in parameters_schema: + parameters_schema["type"] = "object" + parameters_schema.setdefault("properties", {}) # assert "required" in parameters_schema, parameters_schema # Normalize the schema to fix common issues with MCP schemas diff --git a/letta/functions/schema_validator.py b/letta/functions/schema_validator.py index dd99fd04..82c921c2 100644 --- a/letta/functions/schema_validator.py +++ b/letta/functions/schema_validator.py @@ -56,7 +56,7 @@ def validate_complete_json_schema(schema: Dict[str, Any]) -> Tuple[SchemaHealth, """ if obj_schema.get("type") != "object": return False - props = obj_schema.get("properties", {}) + obj_schema.get("properties", {}) required = obj_schema.get("required", []) additional = obj_schema.get("additionalProperties", True) diff --git a/letta/groups/dynamic_multi_agent.py b/letta/groups/dynamic_multi_agent.py index 4f0a09ab..1e3368ff 100644 --- a/letta/groups/dynamic_multi_agent.py +++ b/letta/groups/dynamic_multi_agent.py @@ -1,4 +1,7 @@ -from typing import List, Optional +from typing import TYPE_CHECKING, List, Optional + +if TYPE_CHECKING: + from letta.agents.letta_agent import LettaAgent as Agent from letta.agents.base_agent import BaseAgent from letta.agents.letta_agent import LettaAgent @@ -92,7 +95,7 @@ class DynamicMultiAgent(BaseAgent): # Parse manager response responses = Message.to_letta_messages_from_list(manager_agent.last_response_messages) - assistant_message = [response for response in responses if response.message_type == "assistant_message"][0] + assistant_message = next(response for response in responses if response.message_type == "assistant_message") for name, agent_id in [(agents[agent_id].agent_state.name, agent_id) for agent_id in agent_id_options]: if name.lower() in assistant_message.content.lower(): speaker_id = agent_id diff --git a/letta/groups/helpers.py b/letta/groups/helpers.py index 1b8ec916..386a6a3f 100644 --- a/letta/groups/helpers.py +++ b/letta/groups/helpers.py @@ -98,7 +98,7 @@ def stringify_message(message: Message, use_assistant_name: bool = False) -> str elif isinstance(content, ImageContent): messages.append(f"{message.name or 'user'}: [Image Here]") return "\n".join(messages) - except: + except Exception: if message.content and len(message.content) > 0: return f"{message.name or 'user'}: {message.content[0].text}" return None diff --git a/letta/groups/sleeptime_multi_agent_v2.py b/letta/groups/sleeptime_multi_agent_v2.py index 563600b7..65b33632 100644 --- a/letta/groups/sleeptime_multi_agent_v2.py +++ b/letta/groups/sleeptime_multi_agent_v2.py @@ -1,4 +1,3 @@ -import asyncio from collections.abc import AsyncGenerator from datetime import datetime, timezone @@ -213,7 +212,7 @@ class SleeptimeMultiAgentV2(BaseAgent): group_id=self.group.id, last_processed_message_id=last_response_messages[-1].id, actor=self.actor ) for sleeptime_agent_id in self.group.agent_ids: - run_id = await self._issue_background_task( + await self._issue_background_task( sleeptime_agent_id, last_response_messages, last_processed_message_id, diff --git a/letta/groups/sleeptime_multi_agent_v3.py b/letta/groups/sleeptime_multi_agent_v3.py index 6c4ea830..d1c8c302 100644 --- a/letta/groups/sleeptime_multi_agent_v3.py +++ b/letta/groups/sleeptime_multi_agent_v3.py @@ -9,7 +9,6 @@ from letta.otel.tracing import trace_method from letta.schemas.agent import AgentState from letta.schemas.enums import RunStatus from letta.schemas.group import Group, ManagerType -from letta.schemas.job import JobUpdate from letta.schemas.letta_message import MessageType from letta.schemas.letta_message_content import TextContent from letta.schemas.letta_request import ClientToolSchema @@ -47,6 +46,7 @@ class SleeptimeMultiAgentV3(LettaAgentV2): include_return_message_types: list[MessageType] | None = None, request_start_timestamp_ns: int | None = None, client_tools: list[ClientToolSchema] | None = None, + include_compaction_messages: bool = False, ) -> LettaResponse: self.run_ids = [] @@ -61,6 +61,7 @@ class SleeptimeMultiAgentV3(LettaAgentV2): include_return_message_types=include_return_message_types, request_start_timestamp_ns=request_start_timestamp_ns, client_tools=client_tools, + include_compaction_messages=include_compaction_messages, ) await self.run_sleeptime_agents() @@ -79,6 +80,7 @@ class SleeptimeMultiAgentV3(LettaAgentV2): request_start_timestamp_ns: int | None = None, include_return_message_types: list[MessageType] | None = None, client_tools: list[ClientToolSchema] | None = None, + include_compaction_messages: bool = False, ) -> AsyncGenerator[str, None]: self.run_ids = [] @@ -96,6 +98,7 @@ class SleeptimeMultiAgentV3(LettaAgentV2): include_return_message_types=include_return_message_types, request_start_timestamp_ns=request_start_timestamp_ns, client_tools=client_tools, + include_compaction_messages=include_compaction_messages, ): yield chunk finally: diff --git a/letta/groups/sleeptime_multi_agent_v4.py b/letta/groups/sleeptime_multi_agent_v4.py index 85e7ca08..9995ee15 100644 --- a/letta/groups/sleeptime_multi_agent_v4.py +++ b/letta/groups/sleeptime_multi_agent_v4.py @@ -1,4 +1,3 @@ -import asyncio from collections.abc import AsyncGenerator from datetime import datetime, timezone @@ -7,9 +6,8 @@ from letta.constants import DEFAULT_MAX_STEPS from letta.groups.helpers import stringify_message from letta.otel.tracing import trace_method from letta.schemas.agent import AgentState -from letta.schemas.enums import JobStatus, RunStatus +from letta.schemas.enums import RunStatus from letta.schemas.group import Group, ManagerType -from letta.schemas.job import JobUpdate from letta.schemas.letta_message import MessageType from letta.schemas.letta_message_content import TextContent from letta.schemas.letta_request import ClientToolSchema @@ -48,6 +46,7 @@ class SleeptimeMultiAgentV4(LettaAgentV3): request_start_timestamp_ns: int | None = None, conversation_id: str | None = None, client_tools: list[ClientToolSchema] | None = None, + include_compaction_messages: bool = False, ) -> LettaResponse: self.run_ids = [] @@ -63,6 +62,7 @@ class SleeptimeMultiAgentV4(LettaAgentV3): request_start_timestamp_ns=request_start_timestamp_ns, conversation_id=conversation_id, client_tools=client_tools, + include_compaction_messages=include_compaction_messages, ) run_ids = await self.run_sleeptime_agents() @@ -81,6 +81,7 @@ class SleeptimeMultiAgentV4(LettaAgentV3): include_return_message_types: list[MessageType] | None = None, conversation_id: str | None = None, client_tools: list[ClientToolSchema] | None = None, + include_compaction_messages: bool = False, ) -> AsyncGenerator[str, None]: self.run_ids = [] @@ -99,6 +100,7 @@ class SleeptimeMultiAgentV4(LettaAgentV3): request_start_timestamp_ns=request_start_timestamp_ns, conversation_id=conversation_id, client_tools=client_tools, + include_compaction_messages=include_compaction_messages, ): yield chunk finally: diff --git a/letta/groups/supervisor_multi_agent.py b/letta/groups/supervisor_multi_agent.py index 1a87aa67..d95afde6 100644 --- a/letta/groups/supervisor_multi_agent.py +++ b/letta/groups/supervisor_multi_agent.py @@ -1,19 +1,9 @@ -from typing import List, Optional +from typing import List from letta.agents.base_agent import BaseAgent -from letta.constants import DEFAULT_MESSAGE_TOOL -from letta.functions.function_sets.multi_agent import send_message_to_all_agents_in_group -from letta.functions.functions import parse_source_code -from letta.functions.schema_generator import generate_schema from letta.interface import AgentInterface from letta.orm import User from letta.schemas.agent import AgentState -from letta.schemas.enums import ToolType -from letta.schemas.letta_message_content import TextContent -from letta.schemas.message import MessageCreate -from letta.schemas.tool import Tool -from letta.schemas.tool_rule import ChildToolRule, InitToolRule, TerminalToolRule -from letta.schemas.usage import LettaUsageStatistics from letta.services.agent_manager import AgentManager from letta.services.tool_manager import ToolManager diff --git a/letta/helpers/__init__.py b/letta/helpers/__init__.py index 62e8d709..9c9cd242 100644 --- a/letta/helpers/__init__.py +++ b/letta/helpers/__init__.py @@ -1 +1 @@ -from letta.helpers.tool_rule_solver import ToolRulesSolver +from letta.helpers.tool_rule_solver import ToolRulesSolver as ToolRulesSolver diff --git a/letta/helpers/converters.py b/letta/helpers/converters.py index 24cc2ab1..257498bd 100644 --- a/letta/helpers/converters.py +++ b/letta/helpers/converters.py @@ -1,4 +1,7 @@ -from typing import Any, Dict, List, Optional, Union +from typing import TYPE_CHECKING, Any, Dict, List, Optional, Union + +if TYPE_CHECKING: + from letta.services.summarizer.summarizer_config import CompactionSettings import numpy as np from anthropic.types.beta.messages import BetaMessageBatch, BetaMessageBatchIndividualResponse diff --git a/letta/helpers/json_helpers.py b/letta/helpers/json_helpers.py index 27f8b735..35f48d0e 100644 --- a/letta/helpers/json_helpers.py +++ b/letta/helpers/json_helpers.py @@ -4,6 +4,53 @@ from datetime import datetime from typing import Any +def sanitize_unicode_surrogates(value: Any) -> Any: + """Recursively remove invalid Unicode surrogate characters from strings. + + Unicode surrogate pairs (U+D800 to U+DFFF) are used internally by UTF-16 encoding + but are invalid as standalone characters in UTF-8. When present, they cause + UnicodeEncodeError when encoding to UTF-8, breaking API requests that need to + serialize data to JSON. + + This function sanitizes: + - Strings: removes unpaired surrogates that can't be encoded to UTF-8 + - Dicts: recursively sanitizes all string values + - Lists: recursively sanitizes all elements + - Other types: returned as-is + + Args: + value: The value to sanitize + + Returns: + The sanitized value with surrogate characters removed from all strings + """ + if isinstance(value, str): + # Remove lone surrogate characters (U+D800 to U+DFFF) which are invalid in UTF-8 + # Using character filtering is more reliable than encode/decode for edge cases + try: + # Filter out any character in the surrogate range + return "".join(char for char in value if not (0xD800 <= ord(char) <= 0xDFFF)) + except Exception: + # Fallback: try encode with errors="replace" which replaces surrogates with � + try: + return value.encode("utf-8", errors="replace").decode("utf-8") + except Exception: + # Last resort: return original (should never reach here) + return value + elif isinstance(value, dict): + # Recursively sanitize dictionary keys and values + return {sanitize_unicode_surrogates(k): sanitize_unicode_surrogates(v) for k, v in value.items()} + elif isinstance(value, list): + # Recursively sanitize list elements + return [sanitize_unicode_surrogates(item) for item in value] + elif isinstance(value, tuple): + # Recursively sanitize tuple elements (return as tuple) + return tuple(sanitize_unicode_surrogates(item) for item in value) + else: + # Return other types as-is (int, float, bool, None, etc.) + return value + + def sanitize_null_bytes(value: Any) -> Any: """Recursively remove null bytes (0x00) from strings. diff --git a/letta/helpers/message_helper.py b/letta/helpers/message_helper.py index f4e142df..84d8cd0b 100644 --- a/letta/helpers/message_helper.py +++ b/letta/helpers/message_helper.py @@ -139,6 +139,20 @@ async def _convert_message_create_to_message( image_media_type, _ = mimetypes.guess_type(file_path) if not image_media_type: image_media_type = "image/jpeg" # default fallback + elif url.startswith("data:"): + # Handle data: URLs (inline base64 encoded images) + # Format: data:[][;base64], + try: + # Split header from data + header, image_data = url.split(",", 1) + # Extract media type from header (e.g., "data:image/jpeg;base64") + header_parts = header.split(";") + image_media_type = header_parts[0].replace("data:", "") or "image/jpeg" + # Data is already base64 encoded, set directly and continue + content.source = Base64Image(media_type=image_media_type, data=image_data) + continue # Skip the common conversion path below + except ValueError: + raise LettaImageFetchError(url=url[:100] + "...", reason="Invalid data URL format") else: # Handle http(s):// URLs using async httpx image_bytes, image_media_type = await _fetch_image_from_url(url) diff --git a/letta/helpers/pinecone_utils.py b/letta/helpers/pinecone_utils.py index d85727e5..409d7ddc 100644 --- a/letta/helpers/pinecone_utils.py +++ b/letta/helpers/pinecone_utils.py @@ -306,7 +306,9 @@ async def search_pinecone_index(query: str, limit: int, filter: Dict[str, Any], @pinecone_retry() @trace_method -async def list_pinecone_index_for_files(file_id: str, actor: User, limit: int = None, pagination_token: str = None) -> List[str]: +async def list_pinecone_index_for_files( + file_id: str, actor: User, limit: int | None = None, pagination_token: str | None = None +) -> List[str]: if not PINECONE_AVAILABLE: raise ImportError("Pinecone is not available. Please install pinecone to use this feature.") diff --git a/letta/helpers/tool_execution_helper.py b/letta/helpers/tool_execution_helper.py index 1058e2f3..3d6a7fa1 100644 --- a/letta/helpers/tool_execution_helper.py +++ b/letta/helpers/tool_execution_helper.py @@ -1,6 +1,6 @@ import copy from collections import OrderedDict -from typing import Any, Dict, List, Optional +from typing import Any, Dict, Optional from letta.constants import PRE_EXECUTION_MESSAGE_ARG from letta.schemas.tool import MCP_TOOL_METADATA_SCHEMA_STATUS, MCP_TOOL_METADATA_SCHEMA_WARNINGS @@ -201,7 +201,7 @@ def add_pre_execution_message(tool_schema: Dict[str, Any], description: Optional # Ensure pre-execution message is the first required field if PRE_EXECUTION_MESSAGE_ARG not in required: - required = [PRE_EXECUTION_MESSAGE_ARG] + required + required = [PRE_EXECUTION_MESSAGE_ARG, *required] # Update the schema with ordered properties and required list schema["parameters"] = { diff --git a/letta/helpers/tpuf_client.py b/letta/helpers/tpuf_client.py index 169b9969..4976080a 100644 --- a/letta/helpers/tpuf_client.py +++ b/letta/helpers/tpuf_client.py @@ -3,12 +3,20 @@ import asyncio import json import logging +import random from datetime import datetime, timezone -from typing import Any, Callable, List, Optional, Tuple +from functools import wraps +from typing import TYPE_CHECKING, Any, Callable, List, Optional, Tuple, TypeVar + +if TYPE_CHECKING: + from letta.schemas.tool import Tool as PydanticTool + from letta.schemas.user import User as PydanticUser + +import httpx from letta.constants import DEFAULT_EMBEDDING_CHUNK_SIZE from letta.errors import LettaInvalidArgumentError -from letta.otel.tracing import trace_method +from letta.otel.tracing import log_event, trace_method from letta.schemas.embedding_config import EmbeddingConfig from letta.schemas.enums import MessageRole, TagMatchMode from letta.schemas.passage import Passage as PydanticPassage @@ -16,6 +24,136 @@ from letta.settings import model_settings, settings logger = logging.getLogger(__name__) +# Type variable for generic async retry decorator +T = TypeVar("T") + +# Default retry configuration for turbopuffer operations +TPUF_MAX_RETRIES = 3 +TPUF_INITIAL_DELAY = 1.0 # seconds +TPUF_EXPONENTIAL_BASE = 2.0 +TPUF_JITTER = True + + +def is_transient_error(error: Exception) -> bool: + """Check if an error is transient and should be retried. + + Args: + error: The exception to check + + Returns: + True if the error is transient and can be retried + """ + # httpx connection errors (network issues, DNS failures, etc.) + if isinstance(error, httpx.ConnectError): + return True + + # httpx timeout errors + if isinstance(error, httpx.TimeoutException): + return True + + # httpx network errors + if isinstance(error, httpx.NetworkError): + return True + + # Check for connection-related errors in the error message + error_str = str(error).lower() + transient_patterns = [ + "connect call failed", + "connection refused", + "connection reset", + "connection timed out", + "temporary failure", + "name resolution", + "dns", + "network unreachable", + "no route to host", + "ssl handshake", + ] + for pattern in transient_patterns: + if pattern in error_str: + return True + + return False + + +def async_retry_with_backoff( + max_retries: int = TPUF_MAX_RETRIES, + initial_delay: float = TPUF_INITIAL_DELAY, + exponential_base: float = TPUF_EXPONENTIAL_BASE, + jitter: bool = TPUF_JITTER, +): + """Decorator for async functions that retries on transient errors with exponential backoff. + + Args: + max_retries: Maximum number of retry attempts + initial_delay: Initial delay between retries in seconds + exponential_base: Base for exponential backoff calculation + jitter: Whether to add random jitter to delays + + Returns: + Decorated async function with retry logic + """ + + def decorator(func: Callable[..., Any]) -> Callable[..., Any]: + @wraps(func) + async def wrapper(*args, **kwargs) -> Any: + num_retries = 0 + delay = initial_delay + + while True: + try: + return await func(*args, **kwargs) + except Exception as e: + # Check if this is a retryable error + if not is_transient_error(e): + # Not a transient error, re-raise immediately + raise + + num_retries += 1 + + # Log the retry attempt + log_event( + "turbopuffer_retry_attempt", + { + "attempt": num_retries, + "delay": delay, + "error_type": type(e).__name__, + "error": str(e), + "function": func.__name__, + }, + ) + logger.warning( + f"Turbopuffer operation '{func.__name__}' failed with transient error " + f"(attempt {num_retries}/{max_retries}): {e}. Retrying in {delay:.1f}s..." + ) + + # Check if max retries exceeded + if num_retries > max_retries: + log_event( + "turbopuffer_max_retries_exceeded", + { + "max_retries": max_retries, + "error_type": type(e).__name__, + "error": str(e), + "function": func.__name__, + }, + ) + logger.error(f"Turbopuffer operation '{func.__name__}' failed after {max_retries} retries: {e}") + raise + + # Wait with exponential backoff + await asyncio.sleep(delay) + + # Calculate next delay with optional jitter + delay *= exponential_base + if jitter: + delay *= 1 + random.random() * 0.1 # Add up to 10% jitter + + return wrapper + + return decorator + + # Global semaphore for Turbopuffer operations to prevent overwhelming the service # This is separate from embedding semaphore since Turbopuffer can handle more concurrency _GLOBAL_TURBOPUFFER_SEMAPHORE = asyncio.Semaphore(5) @@ -25,11 +163,11 @@ def _run_turbopuffer_write_in_thread( api_key: str, region: str, namespace_name: str, - upsert_columns: dict = None, - deletes: list = None, - delete_by_filter: tuple = None, + upsert_columns: dict | None = None, + deletes: list | None = None, + delete_by_filter: tuple | None = None, distance_metric: str = "cosine_distance", - schema: dict = None, + schema: dict | None = None, ): """ Sync wrapper to run turbopuffer write in isolated event loop. @@ -93,7 +231,7 @@ class TurbopufferClient: embedding_chunk_size=DEFAULT_EMBEDDING_CHUNK_SIZE, ) - def __init__(self, api_key: str = None, region: str = None): + def __init__(self, api_key: str | None = None, region: str | None = None): """Initialize Turbopuffer client.""" self.api_key = api_key or settings.tpuf_api_key self.region = region or settings.tpuf_region @@ -222,6 +360,7 @@ class TurbopufferClient: return json.dumps(parts) @trace_method + @async_retry_with_backoff() async def insert_tools( self, tools: List["PydanticTool"], @@ -238,7 +377,6 @@ class TurbopufferClient: Returns: True if successful """ - from turbopuffer import AsyncTurbopuffer if not tools: return True @@ -313,6 +451,7 @@ class TurbopufferClient: raise @trace_method + @async_retry_with_backoff() async def insert_archival_memories( self, archive_id: str, @@ -339,7 +478,6 @@ class TurbopufferClient: Returns: List of PydanticPassage objects that were inserted """ - from turbopuffer import AsyncTurbopuffer # filter out empty text chunks filtered_chunks = [(i, text) for i, text in enumerate(text_chunks) if text.strip()] @@ -464,6 +602,7 @@ class TurbopufferClient: raise @trace_method + @async_retry_with_backoff() async def insert_messages( self, agent_id: str, @@ -494,7 +633,6 @@ class TurbopufferClient: Returns: True if successful """ - from turbopuffer import AsyncTurbopuffer # filter out empty message texts filtered_messages = [(i, text) for i, text in enumerate(message_texts) if text.strip()] @@ -609,6 +747,7 @@ class TurbopufferClient: raise @trace_method + @async_retry_with_backoff() async def _execute_query( self, namespace_name: str, @@ -1377,9 +1516,9 @@ class TurbopufferClient: return sorted_results[:top_k] @trace_method + @async_retry_with_backoff() async def delete_passage(self, archive_id: str, passage_id: str) -> bool: """Delete a passage from Turbopuffer.""" - from turbopuffer import AsyncTurbopuffer namespace_name = await self._get_archive_namespace_name(archive_id) @@ -1399,9 +1538,9 @@ class TurbopufferClient: raise @trace_method + @async_retry_with_backoff() async def delete_passages(self, archive_id: str, passage_ids: List[str]) -> bool: """Delete multiple passages from Turbopuffer.""" - from turbopuffer import AsyncTurbopuffer if not passage_ids: return True @@ -1424,6 +1563,7 @@ class TurbopufferClient: raise @trace_method + @async_retry_with_backoff() async def delete_all_passages(self, archive_id: str) -> bool: """Delete all passages for an archive from Turbopuffer.""" from turbopuffer import AsyncTurbopuffer @@ -1442,9 +1582,9 @@ class TurbopufferClient: raise @trace_method + @async_retry_with_backoff() async def delete_messages(self, agent_id: str, organization_id: str, message_ids: List[str]) -> bool: """Delete multiple messages from Turbopuffer.""" - from turbopuffer import AsyncTurbopuffer if not message_ids: return True @@ -1467,9 +1607,9 @@ class TurbopufferClient: raise @trace_method + @async_retry_with_backoff() async def delete_all_messages(self, agent_id: str, organization_id: str) -> bool: """Delete all messages for an agent from Turbopuffer.""" - from turbopuffer import AsyncTurbopuffer namespace_name = await self._get_message_namespace_name(organization_id) @@ -1509,6 +1649,7 @@ class TurbopufferClient: return namespace_name @trace_method + @async_retry_with_backoff() async def insert_file_passages( self, source_id: str, @@ -1531,7 +1672,6 @@ class TurbopufferClient: Returns: List of PydanticPassage objects that were inserted """ - from turbopuffer import AsyncTurbopuffer if not text_chunks: return [] @@ -1765,9 +1905,9 @@ class TurbopufferClient: return passages_with_scores @trace_method + @async_retry_with_backoff() async def delete_file_passages(self, source_id: str, file_id: str, organization_id: str) -> bool: """Delete all passages for a specific file from Turbopuffer.""" - from turbopuffer import AsyncTurbopuffer namespace_name = await self._get_file_passages_namespace_name(organization_id) @@ -1793,9 +1933,9 @@ class TurbopufferClient: raise @trace_method + @async_retry_with_backoff() async def delete_source_passages(self, source_id: str, organization_id: str) -> bool: """Delete all passages for a source from Turbopuffer.""" - from turbopuffer import AsyncTurbopuffer namespace_name = await self._get_file_passages_namespace_name(organization_id) @@ -1817,6 +1957,7 @@ class TurbopufferClient: # tool methods @trace_method + @async_retry_with_backoff() async def delete_tools(self, organization_id: str, tool_ids: List[str]) -> bool: """Delete tools from Turbopuffer. @@ -1827,7 +1968,6 @@ class TurbopufferClient: Returns: True if successful """ - from turbopuffer import AsyncTurbopuffer if not tool_ids: return True diff --git a/letta/interface.py b/letta/interface.py index 7e146b07..a290d893 100644 --- a/letta/interface.py +++ b/letta/interface.py @@ -136,7 +136,7 @@ class CLIInterface(AgentInterface): else: try: msg_json = json_loads(msg) - except: + except Exception: printd(f"{CLI_WARNING_PREFIX}failed to parse user message into json") printd_user_message("🧑", msg) return diff --git a/letta/interfaces/anthropic_parallel_tool_call_streaming_interface.py b/letta/interfaces/anthropic_parallel_tool_call_streaming_interface.py index 0c13a727..bde3d666 100644 --- a/letta/interfaces/anthropic_parallel_tool_call_streaming_interface.py +++ b/letta/interfaces/anthropic_parallel_tool_call_streaming_interface.py @@ -3,7 +3,12 @@ import json from collections.abc import AsyncGenerator from datetime import datetime, timezone from enum import Enum -from typing import Optional +from typing import TYPE_CHECKING, Optional + +if TYPE_CHECKING: + from opentelemetry.trace import Span + + from letta.schemas.usage import LettaUsageStatistics from anthropic import AsyncStream from anthropic.types.beta import ( @@ -274,7 +279,13 @@ class SimpleAnthropicStreamingInterface: attributes={"stop_reason": StopReasonType.error.value, "error": str(e), "stacktrace": traceback.format_exc()}, ) yield LettaStopReason(stop_reason=StopReasonType.error) - raise e + + # Transform Anthropic errors into our custom error types for consistent handling + from letta.llm_api.anthropic_client import AnthropicClient + + client = AnthropicClient() + transformed_error = client.handle_llm_error(e) + raise transformed_error finally: logger.info("AnthropicStreamingInterface: Stream processing complete.") @@ -316,6 +327,7 @@ class SimpleAnthropicStreamingInterface: id=decrement_message_uuid(self.letta_message_id), # Do not emit placeholder arguments here to avoid UI duplicates tool_call=ToolCallDelta(name=name, tool_call_id=call_id), + tool_calls=ToolCallDelta(name=name, tool_call_id=call_id), date=datetime.now(timezone.utc).isoformat(), otid=Message.generate_otid_from_id(decrement_message_uuid(self.letta_message_id), -1), run_id=self.run_id, @@ -421,6 +433,7 @@ class SimpleAnthropicStreamingInterface: tool_call_msg = ApprovalRequestMessage( id=decrement_message_uuid(self.letta_message_id), tool_call=ToolCallDelta(name=name, tool_call_id=call_id, arguments=delta.partial_json), + tool_calls=ToolCallDelta(name=name, tool_call_id=call_id, arguments=delta.partial_json), date=datetime.now(timezone.utc).isoformat(), otid=Message.generate_otid_from_id(decrement_message_uuid(self.letta_message_id), -1), run_id=self.run_id, diff --git a/letta/interfaces/anthropic_streaming_interface.py b/letta/interfaces/anthropic_streaming_interface.py index fa1fdefa..dcb8d4e7 100644 --- a/letta/interfaces/anthropic_streaming_interface.py +++ b/letta/interfaces/anthropic_streaming_interface.py @@ -3,7 +3,12 @@ import json from collections.abc import AsyncGenerator from datetime import datetime, timezone from enum import Enum -from typing import Optional +from typing import TYPE_CHECKING, Optional + +if TYPE_CHECKING: + from opentelemetry.trace import Span + + from letta.schemas.usage import LettaUsageStatistics from anthropic import AsyncStream from anthropic.types.beta import ( @@ -116,7 +121,7 @@ class AnthropicStreamingInterface: # Attempt to use OptimisticJSONParser to handle incomplete/malformed JSON try: tool_input = self.json_parser.parse(args_str) - except: + except Exception: logger.warning( f"Failed to decode tool call arguments for tool_call_id={self.tool_call_id}, " f"name={self.tool_call_name}. Raw input: {args_str!r}. Error: {e}" @@ -263,7 +268,13 @@ class AnthropicStreamingInterface: attributes={"stop_reason": StopReasonType.error.value, "error": str(e), "stacktrace": traceback.format_exc()}, ) yield LettaStopReason(stop_reason=StopReasonType.error) - raise e + + # Transform Anthropic errors into our custom error types for consistent handling + from letta.llm_api.anthropic_client import AnthropicClient + + client = AnthropicClient() + transformed_error = client.handle_llm_error(e) + raise transformed_error finally: logger.info("AnthropicStreamingInterface: Stream processing complete.") @@ -424,16 +435,19 @@ class AnthropicStreamingInterface: if current_inner_thoughts: tool_call_args = tool_call_args.replace(f'"{INNER_THOUGHTS_KWARG}": "{current_inner_thoughts}"', "") + tool_call_delta = ToolCallDelta( + name=self.tool_call_name, + tool_call_id=self.tool_call_id, + arguments=tool_call_args, + ) + approval_msg = ApprovalRequestMessage( id=self.letta_message_id, otid=Message.generate_otid_from_id(self.letta_message_id, message_index), date=datetime.now(timezone.utc).isoformat(), name=self.tool_call_name, - tool_call=ToolCallDelta( - name=self.tool_call_name, - tool_call_id=self.tool_call_id, - arguments=tool_call_args, - ), + tool_call=tool_call_delta, + tool_calls=tool_call_delta, run_id=self.run_id, step_id=self.step_id, ) @@ -493,6 +507,9 @@ class AnthropicStreamingInterface: tool_call_msg = ApprovalRequestMessage( id=self.letta_message_id, tool_call=ToolCallDelta(name=self.tool_call_name, tool_call_id=self.tool_call_id, arguments=delta.partial_json), + tool_calls=ToolCallDelta( + name=self.tool_call_name, tool_call_id=self.tool_call_id, arguments=delta.partial_json + ), date=datetime.now(timezone.utc).isoformat(), run_id=self.run_id, step_id=self.step_id, @@ -576,9 +593,15 @@ class AnthropicStreamingInterface: pass elif isinstance(event, BetaRawContentBlockStopEvent): # If we're exiting a tool use block and there are still buffered messages, - # we should flush them now + # we should flush them now. + # Ensure each flushed chunk has an otid before yielding. if self.anthropic_mode == EventMode.TOOL_USE and self.tool_call_buffer: for buffered_msg in self.tool_call_buffer: + if not buffered_msg.otid: + if prev_message_type and prev_message_type != buffered_msg.message_type: + message_index += 1 + buffered_msg.otid = Message.generate_otid_from_id(buffered_msg.id, message_index) + prev_message_type = buffered_msg.message_type yield buffered_msg self.tool_call_buffer = [] @@ -644,7 +667,7 @@ class SimpleAnthropicStreamingInterface: # Attempt to use OptimisticJSONParser to handle incomplete/malformed JSON try: tool_input = self.json_parser.parse(args_str) - except: + except Exception: logger.warning( f"Failed to decode tool call arguments for tool_call_id={self.tool_call_id}, " f"name={self.tool_call_name}. Raw input: {args_str!r}. Error: {e}" @@ -827,6 +850,7 @@ class SimpleAnthropicStreamingInterface: tool_call_msg = ApprovalRequestMessage( id=self.letta_message_id, tool_call=ToolCallDelta(name=self.tool_call_name, tool_call_id=self.tool_call_id), + tool_calls=ToolCallDelta(name=self.tool_call_name, tool_call_id=self.tool_call_id), date=datetime.now(timezone.utc).isoformat(), otid=Message.generate_otid_from_id(self.letta_message_id, message_index), run_id=self.run_id, @@ -911,6 +935,7 @@ class SimpleAnthropicStreamingInterface: tool_call_msg = ApprovalRequestMessage( id=self.letta_message_id, tool_call=ToolCallDelta(name=self.tool_call_name, tool_call_id=self.tool_call_id, arguments=delta.partial_json), + tool_calls=ToolCallDelta(name=self.tool_call_name, tool_call_id=self.tool_call_id, arguments=delta.partial_json), date=datetime.now(timezone.utc).isoformat(), otid=Message.generate_otid_from_id(self.letta_message_id, message_index), run_id=self.run_id, diff --git a/letta/interfaces/gemini_streaming_interface.py b/letta/interfaces/gemini_streaming_interface.py index 9656977c..21c8ddf3 100644 --- a/letta/interfaces/gemini_streaming_interface.py +++ b/letta/interfaces/gemini_streaming_interface.py @@ -3,7 +3,12 @@ import base64 import json from collections.abc import AsyncGenerator from datetime import datetime, timezone -from typing import AsyncIterator, List, Optional +from typing import TYPE_CHECKING, AsyncIterator, List, Optional + +if TYPE_CHECKING: + from opentelemetry.trace import Span + + from letta.schemas.usage import LettaUsageStatistics from google.genai.types import ( GenerateContentResponse, @@ -97,9 +102,11 @@ class SimpleGeminiStreamingInterface: def get_content(self) -> List[ReasoningContent | TextContent | ToolCallContent]: """This is (unusually) in chunked format, instead of merged""" + has_reasoning = any(isinstance(c, ReasoningContent) for c in self.content_parts) for content in self.content_parts: if isinstance(content, ReasoningContent): - # This assumes there is only one signature per turn + content.signature = self.thinking_signature + elif isinstance(content, TextContent) and not has_reasoning and self.thinking_signature: content.signature = self.thinking_signature return self.content_parts @@ -322,15 +329,18 @@ class SimpleGeminiStreamingInterface: self.collected_tool_calls.append(ToolCall(id=call_id, function=FunctionCall(name=name, arguments=arguments_str))) if self.tool_call_name and self.tool_call_name in self.requires_approval_tools: + tool_call_delta = ToolCallDelta( + name=name, + arguments=arguments_str, + tool_call_id=call_id, + ) + yield ApprovalRequestMessage( id=decrement_message_uuid(self.letta_message_id), otid=Message.generate_otid_from_id(decrement_message_uuid(self.letta_message_id), -1), date=datetime.now(timezone.utc), - tool_call=ToolCallDelta( - name=name, - arguments=arguments_str, - tool_call_id=call_id, - ), + tool_call=tool_call_delta, + tool_calls=tool_call_delta, run_id=self.run_id, step_id=self.step_id, ) diff --git a/letta/interfaces/openai_streaming_interface.py b/letta/interfaces/openai_streaming_interface.py index ca3602df..966fe408 100644 --- a/letta/interfaces/openai_streaming_interface.py +++ b/letta/interfaces/openai_streaming_interface.py @@ -1,7 +1,12 @@ import asyncio from collections.abc import AsyncGenerator from datetime import datetime, timezone -from typing import Optional +from typing import TYPE_CHECKING, Optional + +if TYPE_CHECKING: + from opentelemetry.trace import Span + + from letta.schemas.usage import LettaUsageStatistics from openai import AsyncStream from openai.types.chat.chat_completion_chunk import ChatCompletionChunk @@ -14,6 +19,7 @@ from openai.types.responses import ( ResponseFunctionCallArgumentsDeltaEvent, ResponseFunctionCallArgumentsDoneEvent, ResponseFunctionToolCall, + ResponseIncompleteEvent, ResponseInProgressEvent, ResponseOutputItemAddedEvent, ResponseOutputItemDoneEvent, @@ -314,9 +320,6 @@ class OpenAIStreamingInterface: # Track events for diagnostics self.total_events_received += 1 self.last_event_type = "ChatCompletionChunk" - # Track events for diagnostics - self.total_events_received += 1 - self.last_event_type = "ChatCompletionChunk" if not self.model or not self.message_id: self.model = chunk.model @@ -414,25 +417,22 @@ class OpenAIStreamingInterface: if prev_message_type and prev_message_type != "tool_call_message": message_index += 1 self.tool_call_name = str(self._get_function_name_buffer()) + tool_call_delta = ToolCallDelta( + name=self._get_function_name_buffer(), + arguments=None, + tool_call_id=self._get_current_function_id(), + ) if self.tool_call_name in self.requires_approval_tools: tool_call_msg = ApprovalRequestMessage( id=decrement_message_uuid(self.letta_message_id), date=datetime.now(timezone.utc), - tool_call=ToolCallDelta( - name=self._get_function_name_buffer(), - arguments=None, - tool_call_id=self._get_current_function_id(), - ), + tool_call=tool_call_delta, + tool_calls=tool_call_delta, otid=Message.generate_otid_from_id(decrement_message_uuid(self.letta_message_id), -1), run_id=self.run_id, step_id=self.step_id, ) else: - tool_call_delta = ToolCallDelta( - name=self._get_function_name_buffer(), - arguments=None, - tool_call_id=self._get_current_function_id(), - ) tool_call_msg = ToolCallMessage( id=self.letta_message_id, date=datetime.now(timezone.utc), @@ -471,7 +471,7 @@ class OpenAIStreamingInterface: # Minimal, robust extraction: only emit the value of "message". # If we buffered a prefix while name was streaming, feed it first. if self._function_args_buffer_parts: - payload = "".join(self._function_args_buffer_parts + [tool_call.function.arguments]) + payload = "".join([*self._function_args_buffer_parts, tool_call.function.arguments]) self._function_args_buffer_parts = None else: payload = tool_call.function.arguments @@ -498,29 +498,26 @@ class OpenAIStreamingInterface: # if the previous chunk had arguments but we needed to flush name if self._function_args_buffer_parts: # In this case, we should release the buffer + new data at once - combined_chunk = "".join(self._function_args_buffer_parts + [updates_main_json]) + combined_chunk = "".join([*self._function_args_buffer_parts, updates_main_json]) if prev_message_type and prev_message_type != "tool_call_message": message_index += 1 + tool_call_delta = ToolCallDelta( + name=self._get_function_name_buffer(), + arguments=combined_chunk, + tool_call_id=self._get_current_function_id(), + ) if self._get_function_name_buffer() in self.requires_approval_tools: tool_call_msg = ApprovalRequestMessage( id=decrement_message_uuid(self.letta_message_id), date=datetime.now(timezone.utc), - tool_call=ToolCallDelta( - name=self._get_function_name_buffer(), - arguments=combined_chunk, - tool_call_id=self._get_current_function_id(), - ), + tool_call=tool_call_delta, + tool_calls=tool_call_delta, # name=name, otid=Message.generate_otid_from_id(decrement_message_uuid(self.letta_message_id), -1), run_id=self.run_id, step_id=self.step_id, ) else: - tool_call_delta = ToolCallDelta( - name=self._get_function_name_buffer(), - arguments=combined_chunk, - tool_call_id=self._get_current_function_id(), - ) tool_call_msg = ToolCallMessage( id=self.letta_message_id, date=datetime.now(timezone.utc), @@ -540,26 +537,23 @@ class OpenAIStreamingInterface: # If there's no buffer to clear, just output a new chunk with new data if prev_message_type and prev_message_type != "tool_call_message": message_index += 1 + tool_call_delta = ToolCallDelta( + name=None, + arguments=updates_main_json, + tool_call_id=self._get_current_function_id(), + ) if self._get_function_name_buffer() in self.requires_approval_tools: tool_call_msg = ApprovalRequestMessage( id=decrement_message_uuid(self.letta_message_id), date=datetime.now(timezone.utc), - tool_call=ToolCallDelta( - name=None, - arguments=updates_main_json, - tool_call_id=self._get_current_function_id(), - ), + tool_call=tool_call_delta, + tool_calls=tool_call_delta, # name=name, otid=Message.generate_otid_from_id(decrement_message_uuid(self.letta_message_id), -1), run_id=self.run_id, step_id=self.step_id, ) else: - tool_call_delta = ToolCallDelta( - name=None, - arguments=updates_main_json, - tool_call_id=self._get_current_function_id(), - ) tool_call_msg = ToolCallMessage( id=self.letta_message_id, date=datetime.now(timezone.utc), @@ -588,7 +582,7 @@ class SimpleOpenAIStreamingInterface: messages: Optional[list] = None, tools: Optional[list] = None, requires_approval_tools: list = [], - model: str = None, + model: str | None = None, run_id: str | None = None, step_id: str | None = None, cancellation_event: Optional["asyncio.Event"] = None, @@ -639,7 +633,6 @@ class SimpleOpenAIStreamingInterface: def get_content(self) -> list[TextContent | OmittedReasoningContent | ReasoningContent]: shown_omitted = False - concat_content = "" merged_messages = [] reasoning_content = [] concat_content_parts: list[str] = [] @@ -837,6 +830,10 @@ class SimpleOpenAIStreamingInterface: prev_message_type: Optional[str] = None, message_index: int = 0, ) -> AsyncGenerator[LettaMessage | LettaStopReason, None]: + # Track events for diagnostics + self.total_events_received += 1 + self.last_event_type = "ChatCompletionChunk" + if not self.model or not self.message_id: self.model = chunk.model self.message_id = chunk.id @@ -887,14 +884,10 @@ class SimpleOpenAIStreamingInterface: prev_message_type = assistant_msg.message_type yield assistant_msg - if ( - hasattr(chunk, "choices") - and len(chunk.choices) > 0 - and hasattr(chunk.choices[0], "delta") - and hasattr(chunk.choices[0].delta, "reasoning_content") - ): + if hasattr(chunk, "choices") and len(chunk.choices) > 0 and hasattr(chunk.choices[0], "delta"): delta = chunk.choices[0].delta - reasoning_content = getattr(delta, "reasoning_content", None) + # Check for reasoning_content (standard) or reasoning (OpenRouter) + reasoning_content = getattr(delta, "reasoning_content", None) or getattr(delta, "reasoning", None) if reasoning_content is not None and reasoning_content != "": if prev_message_type and prev_message_type != "reasoning_message": message_index += 1 @@ -945,7 +938,7 @@ class SimpleOpenAIStreamingInterface: if resolved_id is None: continue - delta = ToolCallDelta( + tool_call_delta = ToolCallDelta( name=tool_call.function.name if (tool_call.function and tool_call.function.name) else None, arguments=tool_call.function.arguments if (tool_call.function and tool_call.function.arguments) else None, tool_call_id=resolved_id, @@ -956,7 +949,8 @@ class SimpleOpenAIStreamingInterface: tool_call_msg = ApprovalRequestMessage( id=decrement_message_uuid(self.letta_message_id), date=datetime.now(timezone.utc), - tool_call=delta, + tool_call=tool_call_delta, + tool_calls=tool_call_delta, otid=Message.generate_otid_from_id(decrement_message_uuid(self.letta_message_id), -1), run_id=self.run_id, step_id=self.step_id, @@ -967,8 +961,8 @@ class SimpleOpenAIStreamingInterface: tool_call_msg = ToolCallMessage( id=self.letta_message_id, date=datetime.now(timezone.utc), - tool_call=delta, - tool_calls=delta, + tool_call=tool_call_delta, + tool_calls=tool_call_delta, otid=Message.generate_otid_from_id(self.letta_message_id, message_index), run_id=self.run_id, step_id=self.step_id, @@ -988,7 +982,7 @@ class SimpleOpenAIResponsesStreamingInterface: messages: Optional[list] = None, tools: Optional[list] = None, requires_approval_tools: list = [], - model: str = None, + model: str | None = None, run_id: str | None = None, step_id: str | None = None, cancellation_event: Optional["asyncio.Event"] = None, @@ -1029,6 +1023,9 @@ class SimpleOpenAIResponsesStreamingInterface: self.last_event_type: str | None = None self.total_events_received: int = 0 self.stream_was_cancelled: bool = False + # For downstream finish_reason mapping (e.g. max_output_tokens -> "length") + # None means no incomplete reason was observed. + self.incomplete_reason: str | None = None # -------- Mapping helpers (no broad try/except) -------- def _record_tool_mapping(self, event: object, item: object) -> tuple[str | None, str | None, int | None, str | None]: @@ -1089,6 +1086,10 @@ class SimpleOpenAIResponsesStreamingInterface: text=response.content[0].text, ) ) + elif len(response.content) == 0: + # Incomplete responses may have an output message with no content parts + # (model started the message item but hit max_output_tokens before producing text) + logger.warning("ResponseOutputMessage has 0 content parts (likely from an incomplete response), skipping.") else: raise ValueError(f"Got {len(response.content)} content parts, expected 1") @@ -1254,8 +1255,6 @@ class SimpleOpenAIResponsesStreamingInterface: if isinstance(new_event_item, ResponseReasoningItem): # Look for summary delta, or encrypted_content summary = new_event_item.summary - content = new_event_item.content # NOTE: always none - encrypted_content = new_event_item.encrypted_content # TODO change to summarize reasoning message, but we need to figure out the streaming indices of summary problem concat_summary = "".join([s.text for s in summary]) if concat_summary != "": @@ -1283,27 +1282,24 @@ class SimpleOpenAIResponsesStreamingInterface: self.tool_call_name = name # Record mapping so subsequent argument deltas can be associated self._record_tool_mapping(event, new_event_item) + tool_call_delta = ToolCallDelta( + name=name, + arguments=arguments if arguments != "" else None, + tool_call_id=call_id, + ) if self.tool_call_name and self.tool_call_name in self.requires_approval_tools: yield ApprovalRequestMessage( id=decrement_message_uuid(self.letta_message_id), otid=Message.generate_otid_from_id(decrement_message_uuid(self.letta_message_id), -1), date=datetime.now(timezone.utc), - tool_call=ToolCallDelta( - name=name, - arguments=arguments if arguments != "" else None, - tool_call_id=call_id, - ), + tool_call=tool_call_delta, + tool_calls=tool_call_delta, run_id=self.run_id, step_id=self.step_id, ) else: if prev_message_type and prev_message_type != "tool_call_message": message_index += 1 - tool_call_delta = ToolCallDelta( - name=name, - arguments=arguments if arguments != "" else None, - tool_call_id=call_id, - ) yield ToolCallMessage( id=self.letta_message_id, otid=Message.generate_otid_from_id(self.letta_message_id, message_index), @@ -1394,7 +1390,6 @@ class SimpleOpenAIResponsesStreamingInterface: # NOTE: is this inclusive of the deltas? # If not, we should add it to the rolling summary_index = event.summary_index - text = event.text return # Reasoning summary streaming @@ -1436,7 +1431,6 @@ class SimpleOpenAIResponsesStreamingInterface: # Assistant message streaming elif isinstance(event, ResponseTextDoneEvent): # NOTE: inclusive, can skip - text = event.text return # Assistant message done @@ -1451,7 +1445,7 @@ class SimpleOpenAIResponsesStreamingInterface: delta = event.delta # Resolve tool_call_id/name using output_index or item_id - resolved_call_id, resolved_name, out_idx, item_id = self._resolve_mapping_for_delta(event) + resolved_call_id, resolved_name, _out_idx, _item_id = self._resolve_mapping_for_delta(event) # Fallback to last seen tool name for approval routing if mapping name missing if not resolved_name: @@ -1462,27 +1456,24 @@ class SimpleOpenAIResponsesStreamingInterface: return # We have a call id; emit approval or tool-call message accordingly + tool_call_delta = ToolCallDelta( + name=None, + arguments=delta, + tool_call_id=resolved_call_id, + ) if resolved_name and resolved_name in self.requires_approval_tools: yield ApprovalRequestMessage( id=decrement_message_uuid(self.letta_message_id), otid=Message.generate_otid_from_id(decrement_message_uuid(self.letta_message_id), -1), date=datetime.now(timezone.utc), - tool_call=ToolCallDelta( - name=None, - arguments=delta, - tool_call_id=resolved_call_id, - ), + tool_call=tool_call_delta, + tool_calls=tool_call_delta, run_id=self.run_id, step_id=self.step_id, ) else: if prev_message_type and prev_message_type != "tool_call_message": message_index += 1 - tool_call_delta = ToolCallDelta( - name=None, - arguments=delta, - tool_call_id=resolved_call_id, - ) yield ToolCallMessage( id=self.letta_message_id, otid=Message.generate_otid_from_id(self.letta_message_id, message_index), @@ -1497,7 +1488,6 @@ class SimpleOpenAIResponsesStreamingInterface: # Function calls elif isinstance(event, ResponseFunctionCallArgumentsDoneEvent): # NOTE: inclusive - full_args = event.arguments return # Generic @@ -1506,31 +1496,55 @@ class SimpleOpenAIResponsesStreamingInterface: return # Generic finish - elif isinstance(event, ResponseCompletedEvent): - # NOTE we can "rebuild" the final state of the stream using the values in here, instead of relying on the accumulators + elif isinstance(event, (ResponseCompletedEvent, ResponseIncompleteEvent)): + # ResponseIncompleteEvent has the same response structure as ResponseCompletedEvent, + # but indicates the response was cut short (e.g. due to max_output_tokens). + # We still extract the partial response and usage data so they aren't silently lost. + if isinstance(event, ResponseIncompleteEvent): + self.incomplete_reason = ( + getattr(event.response.incomplete_details, "reason", None) if event.response.incomplete_details else None + ) + reason = self.incomplete_reason or "unknown" + logger.warning( + f"OpenAI Responses API returned an incomplete response (reason: {reason}). " + f"Model: {event.response.model}, output_tokens: {event.response.usage.output_tokens if event.response.usage else 'N/A'}. " + f"The partial response content will still be used." + ) + self.final_response = event.response self.model = event.response.model - self.input_tokens = event.response.usage.input_tokens - self.output_tokens = event.response.usage.output_tokens self.message_id = event.response.id - # Store raw usage for transparent provider trace logging - try: - self.raw_usage = event.response.usage.model_dump(exclude_none=True) - except Exception as e: - logger.error(f"Failed to capture raw_usage from OpenAI Responses API: {e}") - self.raw_usage = None - # Capture cache token details (Responses API uses input_tokens_details) - # Use `is not None` to capture 0 values (meaning "provider reported 0 cached tokens") - if hasattr(event.response.usage, "input_tokens_details") and event.response.usage.input_tokens_details: - details = event.response.usage.input_tokens_details - if hasattr(details, "cached_tokens") and details.cached_tokens is not None: - self.cached_tokens = details.cached_tokens - # Capture reasoning token details (Responses API uses output_tokens_details) - # Use `is not None` to capture 0 values (meaning "provider reported 0 reasoning tokens") - if hasattr(event.response.usage, "output_tokens_details") and event.response.usage.output_tokens_details: - details = event.response.usage.output_tokens_details - if hasattr(details, "reasoning_tokens") and details.reasoning_tokens is not None: - self.reasoning_tokens = details.reasoning_tokens + + usage = event.response.usage + if usage is not None: + self.input_tokens = usage.input_tokens + self.output_tokens = usage.output_tokens + + # Store raw usage for transparent provider trace logging + try: + self.raw_usage = usage.model_dump(exclude_none=True) + except Exception as e: + logger.error(f"Failed to capture raw_usage from OpenAI Responses API: {e}") + self.raw_usage = None + + # Capture cache token details (Responses API uses input_tokens_details) + # Use `is not None` to capture 0 values (meaning "provider reported 0 cached tokens") + if hasattr(usage, "input_tokens_details") and usage.input_tokens_details: + details = usage.input_tokens_details + if hasattr(details, "cached_tokens") and details.cached_tokens is not None: + self.cached_tokens = details.cached_tokens + + # Capture reasoning token details (Responses API uses output_tokens_details) + # Use `is not None` to capture 0 values (meaning "provider reported 0 reasoning tokens") + if hasattr(usage, "output_tokens_details") and usage.output_tokens_details: + details = usage.output_tokens_details + if hasattr(details, "reasoning_tokens") and details.reasoning_tokens is not None: + self.reasoning_tokens = details.reasoning_tokens + else: + logger.warning( + "OpenAI Responses API finish event had no usage payload. " + "Proceeding with partial response but token metrics may be incomplete." + ) return else: diff --git a/letta/jobs/scheduler.py b/letta/jobs/scheduler.py index 8b1bac10..3ee4d136 100644 --- a/letta/jobs/scheduler.py +++ b/letta/jobs/scheduler.py @@ -94,7 +94,7 @@ async def _try_acquire_lock_and_start_scheduler(server: SyncServer) -> bool: if scheduler.running: try: scheduler.shutdown(wait=False) - except: + except Exception: pass return False finally: diff --git a/letta/llm_api/anthropic_client.py b/letta/llm_api/anthropic_client.py index 8acd65dd..08b06f2c 100644 --- a/letta/llm_api/anthropic_client.py +++ b/letta/llm_api/anthropic_client.py @@ -19,6 +19,7 @@ from letta.errors import ( LLMAuthenticationError, LLMBadRequestError, LLMConnectionError, + LLMInsufficientCreditsError, LLMNotFoundError, LLMPermissionDeniedError, LLMProviderOverloaded, @@ -29,13 +30,16 @@ from letta.errors import ( ) from letta.helpers.datetime_helpers import get_utc_time_int from letta.helpers.decorators import deprecated +from letta.helpers.json_helpers import sanitize_unicode_surrogates from letta.llm_api.anthropic_constants import ANTHROPIC_MAX_STRICT_TOOLS, ANTHROPIC_STRICT_MODE_ALLOWLIST +from letta.llm_api.error_utils import is_insufficient_credits_message from letta.llm_api.helpers import add_inner_thoughts_to_functions, unpack_all_inner_thoughts_from_kwargs from letta.llm_api.llm_client_base import LLMClientBase from letta.local_llm.constants import INNER_THOUGHTS_KWARG, INNER_THOUGHTS_KWARG_DESCRIPTION from letta.log import get_logger from letta.otel.tracing import trace_method from letta.schemas.agent import AgentType +from letta.schemas.enums import ProviderCategory from letta.schemas.llm_config import LLMConfig from letta.schemas.message import Message as PydanticMessage from letta.schemas.openai.chat_completion_request import Tool as OpenAITool @@ -45,9 +49,7 @@ from letta.schemas.openai.chat_completion_response import ( FunctionCall, Message as ChoiceMessage, ToolCall, - UsageStatistics, ) -from letta.schemas.response_format import JsonSchemaResponseFormat from letta.schemas.usage import LettaUsageStatistics from letta.settings import model_settings @@ -62,10 +64,16 @@ class AnthropicClient(LLMClientBase): def request(self, request_data: dict, llm_config: LLMConfig) -> dict: client = self._get_anthropic_client(llm_config, async_client=False) betas: list[str] = [] - # Interleaved thinking for reasoner (sync path parity) + + # Opus 4.6 / Sonnet 4.6 Auto Thinking if llm_config.enable_reasoner: - betas.append("interleaved-thinking-2025-05-14") - # 1M context beta for Sonnet 4/4.5 when enabled + if llm_config.model.startswith("claude-opus-4-6") or llm_config.model.startswith("claude-sonnet-4-6"): + betas.append("adaptive-thinking-2026-01-28") + # Interleaved thinking for other reasoners (sync path parity) + else: + betas.append("interleaved-thinking-2025-05-14") + + # 1M context beta for Sonnet 4/4.5 or Opus 4.6 when enabled try: from letta.settings import model_settings @@ -73,12 +81,23 @@ class AnthropicClient(LLMClientBase): llm_config.model.startswith("claude-sonnet-4") or llm_config.model.startswith("claude-sonnet-4-5") ): betas.append("context-1m-2025-08-07") + elif model_settings.anthropic_opus_1m and llm_config.model.startswith("claude-opus-4-6"): + betas.append("context-1m-2025-08-07") except Exception: pass - # Opus 4.5 effort parameter - to extend to other models, modify the model check - if llm_config.model.startswith("claude-opus-4-5") and llm_config.effort is not None: + # Effort parameter for Opus 4.5, Opus 4.6, and Sonnet 4.6 - to extend to other models, modify the model check + if ( + llm_config.model.startswith("claude-opus-4-5") + or llm_config.model.startswith("claude-opus-4-6") + or llm_config.model.startswith("claude-sonnet-4-6") + ) and llm_config.effort is not None: betas.append("effort-2025-11-24") + # Max effort beta for Opus 4.6 / Sonnet 4.6 + if ( + llm_config.model.startswith("claude-opus-4-6") or llm_config.model.startswith("claude-sonnet-4-6") + ) and llm_config.effort == "max": + betas.append("max-effort-2026-01-24") # Context management for Opus 4.5 to preserve thinking blocks (improves cache hits) if llm_config.model.startswith("claude-opus-4-5") and llm_config.enable_reasoner: @@ -88,21 +107,46 @@ class AnthropicClient(LLMClientBase): if llm_config.strict and _supports_structured_outputs(llm_config.model): betas.append("structured-outputs-2025-11-13") - if betas: - response = client.beta.messages.create(**request_data, betas=betas) - else: - response = client.beta.messages.create(**request_data) - return response.model_dump() + try: + if betas: + response = client.beta.messages.create(**request_data, betas=betas) + else: + response = client.beta.messages.create(**request_data) + return response.model_dump() + except ValueError as e: + # Anthropic SDK raises ValueError when streaming is required for long-running operations + # See: https://github.com/anthropics/anthropic-sdk-python#streaming + if "streaming is required" in str(e).lower(): + logger.warning( + "[Anthropic] Non-streaming request rejected due to potential long duration. Error: %s. " + "Note: Synchronous fallback to streaming is not supported. Use async API instead.", + str(e), + ) + # Re-raise as LLMBadRequestError (maps to 502 Bad Gateway) since this is a downstream provider constraint + raise LLMBadRequestError( + message="This operation may take longer than 10 minutes and requires streaming. " + "Please use the async API (request_async) instead of the deprecated sync API. " + f"Original error: {str(e)}", + code=ErrorCode.INTERNAL_SERVER_ERROR, + ) from e + raise @trace_method async def request_async(self, request_data: dict, llm_config: LLMConfig) -> dict: + request_data = sanitize_unicode_surrogates(request_data) + client = await self._get_anthropic_client_async(llm_config, async_client=True) betas: list[str] = [] - # interleaved thinking for reasoner - if llm_config.enable_reasoner: - betas.append("interleaved-thinking-2025-05-14") - # 1M context beta for Sonnet 4/4.5 when enabled + # Opus 4.6 / Sonnet 4.6 Auto Thinking + if llm_config.enable_reasoner: + if llm_config.model.startswith("claude-opus-4-6") or llm_config.model.startswith("claude-sonnet-4-6"): + betas.append("adaptive-thinking-2026-01-28") + # Interleaved thinking for other reasoners (sync path parity) + else: + betas.append("interleaved-thinking-2025-05-14") + + # 1M context beta for Sonnet 4/4.5 or Opus 4.6 when enabled try: from letta.settings import model_settings @@ -110,12 +154,23 @@ class AnthropicClient(LLMClientBase): llm_config.model.startswith("claude-sonnet-4") or llm_config.model.startswith("claude-sonnet-4-5") ): betas.append("context-1m-2025-08-07") + elif model_settings.anthropic_opus_1m and llm_config.model.startswith("claude-opus-4-6"): + betas.append("context-1m-2025-08-07") except Exception: pass - # Opus 4.5 effort parameter - to extend to other models, modify the model check - if llm_config.model.startswith("claude-opus-4-5") and llm_config.effort is not None: + # Effort parameter for Opus 4.5, Opus 4.6, and Sonnet 4.6 - to extend to other models, modify the model check + if ( + llm_config.model.startswith("claude-opus-4-5") + or llm_config.model.startswith("claude-opus-4-6") + or llm_config.model.startswith("claude-sonnet-4-6") + ) and llm_config.effort is not None: betas.append("effort-2025-11-24") + # Max effort beta for Opus 4.6 / Sonnet 4.6 + if ( + llm_config.model.startswith("claude-opus-4-6") or llm_config.model.startswith("claude-sonnet-4-6") + ) and llm_config.effort == "max": + betas.append("max-effort-2026-01-24") # Context management for Opus 4.5 to preserve thinking blocks (improves cache hits) if llm_config.model.startswith("claude-opus-4-5") and llm_config.enable_reasoner: @@ -254,6 +309,8 @@ class AnthropicClient(LLMClientBase): @trace_method async def stream_async(self, request_data: dict, llm_config: LLMConfig) -> AsyncStream[BetaRawMessageStreamEvent]: + request_data = sanitize_unicode_surrogates(request_data) + client = await self._get_anthropic_client_async(llm_config, async_client=True) request_data["stream"] = True @@ -262,12 +319,15 @@ class AnthropicClient(LLMClientBase): # See: https://docs.anthropic.com/en/docs/build-with-claude/tool-use/fine-grained-streaming betas = ["fine-grained-tool-streaming-2025-05-14"] - # If extended thinking, turn on interleaved header - # https://docs.anthropic.com/en/docs/build-with-claude/extended-thinking#interleaved-thinking + # Opus 4.6 / Sonnet 4.6 Auto Thinking if llm_config.enable_reasoner: - betas.append("interleaved-thinking-2025-05-14") + if llm_config.model.startswith("claude-opus-4-6") or llm_config.model.startswith("claude-sonnet-4-6"): + betas.append("adaptive-thinking-2026-01-28") + # Interleaved thinking for other reasoners (sync path parity) + else: + betas.append("interleaved-thinking-2025-05-14") - # 1M context beta for Sonnet 4/4.5 when enabled + # 1M context beta for Sonnet 4/4.5 or Opus 4.6 when enabled try: from letta.settings import model_settings @@ -275,12 +335,23 @@ class AnthropicClient(LLMClientBase): llm_config.model.startswith("claude-sonnet-4") or llm_config.model.startswith("claude-sonnet-4-5") ): betas.append("context-1m-2025-08-07") + elif model_settings.anthropic_opus_1m and llm_config.model.startswith("claude-opus-4-6"): + betas.append("context-1m-2025-08-07") except Exception: pass - # Opus 4.5 effort parameter - to extend to other models, modify the model check - if llm_config.model.startswith("claude-opus-4-5") and llm_config.effort is not None: + # Effort parameter for Opus 4.5, Opus 4.6, and Sonnet 4.6 - to extend to other models, modify the model check + if ( + llm_config.model.startswith("claude-opus-4-5") + or llm_config.model.startswith("claude-opus-4-6") + or llm_config.model.startswith("claude-sonnet-4-6") + ) and llm_config.effort is not None: betas.append("effort-2025-11-24") + # Max effort beta for Opus 4.6 / Sonnet 4.6 + if ( + llm_config.model.startswith("claude-opus-4-6") or llm_config.model.startswith("claude-sonnet-4-6") + ) and llm_config.effort == "max": + betas.append("max-effort-2026-01-24") # Context management for Opus 4.5 to preserve thinking blocks (improves cache hits) if llm_config.model.startswith("claude-opus-4-5") and llm_config.enable_reasoner: @@ -335,7 +406,7 @@ class AnthropicClient(LLMClientBase): for agent_id in agent_messages_mapping } - client = await self._get_anthropic_client_async(list(agent_llm_config_mapping.values())[0], async_client=True) + client = await self._get_anthropic_client_async(next(iter(agent_llm_config_mapping.values())), async_client=True) anthropic_requests = [ Request(custom_id=agent_id, params=MessageCreateParamsNonStreaming(**params)) for agent_id, params in requests.items() @@ -461,25 +532,43 @@ class AnthropicClient(LLMClientBase): } # Extended Thinking - if self.is_reasoning_model(llm_config) and llm_config.enable_reasoner: - thinking_budget = max(llm_config.max_reasoning_tokens, 1024) - if thinking_budget != llm_config.max_reasoning_tokens: - logger.warning( - f"Max reasoning tokens must be at least 1024 for Claude. Setting max_reasoning_tokens to 1024 for model {llm_config.model}." - ) - data["thinking"] = { - "type": "enabled", - "budget_tokens": thinking_budget, - } + # Note: Anthropic does not allow thinking when forcing tool use with split_thread_agent + should_enable_thinking = ( + self.is_reasoning_model(llm_config) + and llm_config.enable_reasoner + and not (agent_type == AgentType.split_thread_agent and force_tool_call is not None) + ) + + if should_enable_thinking: + # Opus 4.6 / Sonnet 4.6 uses Auto Thinking (no budget tokens) + if llm_config.model.startswith("claude-opus-4-6") or llm_config.model.startswith("claude-sonnet-4-6"): + data["thinking"] = { + "type": "adaptive", + } + else: + # Traditional extended thinking with budget tokens + thinking_budget = max(llm_config.max_reasoning_tokens, 1024) + if thinking_budget != llm_config.max_reasoning_tokens: + logger.warning( + f"Max reasoning tokens must be at least 1024 for Claude. Setting max_reasoning_tokens to 1024 for model {llm_config.model}." + ) + data["thinking"] = { + "type": "enabled", + "budget_tokens": thinking_budget, + } # `temperature` may only be set to 1 when thinking is enabled. Please consult our documentation at https://docs.anthropic.com/en/docs/build-with-claude/extended-thinking#important-considerations-when-using-extended-thinking' data["temperature"] = 1.0 # Silently disable prefix_fill for now prefix_fill = False - # Effort configuration for Opus 4.5 (controls token spending) + # Effort configuration for Opus 4.5, Opus 4.6, and Sonnet 4.6 (controls token spending) # To extend to other models, modify the model check - if llm_config.model.startswith("claude-opus-4-5") and llm_config.effort is not None: + if ( + llm_config.model.startswith("claude-opus-4-5") + or llm_config.model.startswith("claude-opus-4-6") + or llm_config.model.startswith("claude-sonnet-4-6") + ) and llm_config.effort is not None: data["output_config"] = {"effort": llm_config.effort} # Context management for Opus 4.5 to preserve thinking blocks and improve cache hits @@ -510,11 +599,16 @@ class AnthropicClient(LLMClientBase): # Special case for summarization path tools_for_request = None tool_choice = None - elif self.is_reasoning_model(llm_config) and llm_config.enable_reasoner or agent_type == AgentType.letta_v1_agent: + elif (self.is_reasoning_model(llm_config) and llm_config.enable_reasoner) or agent_type == AgentType.letta_v1_agent: # NOTE: reasoning models currently do not allow for `any` - # NOTE: react agents should always have auto on, since the precense/absense of tool calls controls chaining - tool_choice = {"type": "auto", "disable_parallel_tool_use": True} - tools_for_request = [OpenAITool(function=f) for f in tools] + # NOTE: react agents should always have at least auto on, since the precense/absense of tool calls controls chaining + if agent_type == AgentType.split_thread_agent and force_tool_call is not None: + tool_choice = {"type": "tool", "name": force_tool_call, "disable_parallel_tool_use": True} + # When forcing a specific tool, only include that tool + tools_for_request = [OpenAITool(function=f) for f in tools if f["name"] == force_tool_call] + else: + tool_choice = {"type": "auto", "disable_parallel_tool_use": True} + tools_for_request = [OpenAITool(function=f) for f in tools] elif force_tool_call is not None: tool_choice = {"type": "tool", "name": force_tool_call, "disable_parallel_tool_use": True} tools_for_request = [OpenAITool(function=f) for f in tools if f["name"] == force_tool_call] @@ -691,7 +785,9 @@ class AnthropicClient(LLMClientBase): return data - async def count_tokens(self, messages: List[dict] = None, model: str = None, tools: List[OpenAITool] = None) -> int: + async def count_tokens( + self, messages: List[dict] | None = None, model: str | None = None, tools: List[OpenAITool] | None = None + ) -> int: logging.getLogger("httpx").setLevel(logging.WARNING) # Use the default client; token counting is lightweight and does not require BYOK overrides client = anthropic.AsyncAnthropic() @@ -811,6 +907,8 @@ class AnthropicClient(LLMClientBase): and (model.startswith("claude-sonnet-4") or model.startswith("claude-sonnet-4-5")) ): betas.append("context-1m-2025-08-07") + elif model and model_settings.anthropic_opus_1m and model.startswith("claude-opus-4-6"): + betas.append("context-1m-2025-08-07") except Exception: pass @@ -851,10 +949,16 @@ class AnthropicClient(LLMClientBase): or llm_config.model.startswith("claude-haiku-4-5") # Opus 4.5 support - to extend effort parameter to other models, modify this check or llm_config.model.startswith("claude-opus-4-5") + # Opus 4.6 support - uses Auto Thinking + or llm_config.model.startswith("claude-opus-4-6") + # Sonnet 4.6 support - same API as Opus 4.6 + or llm_config.model.startswith("claude-sonnet-4-6") ) @trace_method - def handle_llm_error(self, e: Exception) -> Exception: + def handle_llm_error(self, e: Exception, llm_config: Optional[LLMConfig] = None) -> Exception: + is_byok = (llm_config.provider_category == ProviderCategory.byok) if llm_config else None + # make sure to check for overflow errors, regardless of error type error_str = str(e).lower() if ( @@ -869,6 +973,7 @@ class AnthropicClient(LLMClientBase): logger.warning(f"[Anthropic] Context window exceeded: {str(e)}") return ContextWindowExceededError( message=f"Context window exceeded for Anthropic: {str(e)}", + details={"is_byok": is_byok}, ) if isinstance(e, anthropic.APITimeoutError): @@ -876,7 +981,7 @@ class AnthropicClient(LLMClientBase): return LLMTimeoutError( message=f"Request to Anthropic timed out: {str(e)}", code=ErrorCode.TIMEOUT, - details={"cause": str(e.__cause__) if e.__cause__ else None}, + details={"cause": str(e.__cause__) if e.__cause__ else None, "is_byok": is_byok}, ) if isinstance(e, anthropic.APIConnectionError): @@ -884,7 +989,7 @@ class AnthropicClient(LLMClientBase): return LLMConnectionError( message=f"Failed to connect to Anthropic: {str(e)}", code=ErrorCode.INTERNAL_SERVER_ERROR, - details={"cause": str(e.__cause__) if e.__cause__ else None}, + details={"cause": str(e.__cause__) if e.__cause__ else None, "is_byok": is_byok}, ) # Handle httpx.RemoteProtocolError which can occur during streaming @@ -895,7 +1000,7 @@ class AnthropicClient(LLMClientBase): return LLMConnectionError( message=f"Connection error during Anthropic streaming: {str(e)}", code=ErrorCode.INTERNAL_SERVER_ERROR, - details={"cause": str(e.__cause__) if e.__cause__ else None}, + details={"cause": str(e.__cause__) if e.__cause__ else None, "is_byok": is_byok}, ) # Handle httpx network errors which can occur during streaming @@ -905,7 +1010,7 @@ class AnthropicClient(LLMClientBase): return LLMConnectionError( message=f"Network error during Anthropic streaming: {str(e)}", code=ErrorCode.INTERNAL_SERVER_ERROR, - details={"cause": str(e.__cause__) if e.__cause__ else None, "error_type": type(e).__name__}, + details={"cause": str(e.__cause__) if e.__cause__ else None, "error_type": type(e).__name__, "is_byok": is_byok}, ) if isinstance(e, anthropic.RateLimitError): @@ -913,6 +1018,7 @@ class AnthropicClient(LLMClientBase): return LLMRateLimitError( message=f"Rate limited by Anthropic: {str(e)}", code=ErrorCode.RATE_LIMIT_EXCEEDED, + details={"is_byok": is_byok}, ) if isinstance(e, anthropic.BadRequestError): @@ -930,11 +1036,13 @@ class AnthropicClient(LLMClientBase): # 400 - {'type': 'error', 'error': {'type': 'invalid_request_error', 'message': 'input length and `max_tokens` exceed context limit: 173298 + 32000 > 200000, decrease input length or `max_tokens` and try again'}} return ContextWindowExceededError( message=f"Bad request to Anthropic (context window exceeded): {str(e)}", + details={"is_byok": is_byok}, ) else: return LLMBadRequestError( message=f"Bad request to Anthropic: {str(e)}", code=ErrorCode.INTERNAL_SERVER_ERROR, + details={"is_byok": is_byok}, ) if isinstance(e, anthropic.AuthenticationError): @@ -942,6 +1050,7 @@ class AnthropicClient(LLMClientBase): return LLMAuthenticationError( message=f"Authentication failed with Anthropic: {str(e)}", code=ErrorCode.INTERNAL_SERVER_ERROR, + details={"is_byok": is_byok}, ) if isinstance(e, anthropic.PermissionDeniedError): @@ -949,6 +1058,7 @@ class AnthropicClient(LLMClientBase): return LLMPermissionDeniedError( message=f"Permission denied by Anthropic: {str(e)}", code=ErrorCode.INTERNAL_SERVER_ERROR, + details={"is_byok": is_byok}, ) if isinstance(e, anthropic.NotFoundError): @@ -956,6 +1066,7 @@ class AnthropicClient(LLMClientBase): return LLMNotFoundError( message=f"Resource not found in Anthropic: {str(e)}", code=ErrorCode.INTERNAL_SERVER_ERROR, + details={"is_byok": is_byok}, ) if isinstance(e, anthropic.UnprocessableEntityError): @@ -963,23 +1074,29 @@ class AnthropicClient(LLMClientBase): return LLMUnprocessableEntityError( message=f"Invalid request content for Anthropic: {str(e)}", code=ErrorCode.INTERNAL_SERVER_ERROR, + details={"is_byok": is_byok}, ) - if isinstance(e, anthropic.APIStatusError): - logger.warning(f"[Anthropic] API status error: {str(e)}") - # Handle 413 Request Entity Too Large - request payload exceeds size limits - if hasattr(e, "status_code") and e.status_code == 413: - logger.warning(f"[Anthropic] Request too large (413): {str(e)}") - return ContextWindowExceededError( - message=f"Request too large for Anthropic (413): {str(e)}", + if isinstance(e, anthropic.InternalServerError): + error_str = str(e).lower() + if "overflow" in error_str or "upstream connect error" in error_str: + logger.warning(f"[Anthropic] Upstream infrastructure error (transient): {str(e)}") + return LLMServerError( + message=f"Anthropic upstream infrastructure error (transient, may resolve on retry): {str(e)}", + code=ErrorCode.INTERNAL_SERVER_ERROR, + details={ + "status_code": e.status_code if hasattr(e, "status_code") else None, + "transient": True, + }, ) - if "overloaded" in str(e).lower(): + if "overloaded" in error_str: return LLMProviderOverloaded( message=f"Anthropic API is overloaded: {str(e)}", code=ErrorCode.INTERNAL_SERVER_ERROR, ) + logger.warning(f"[Anthropic] Internal server error: {str(e)}") return LLMServerError( - message=f"Anthropic API error: {str(e)}", + message=f"Anthropic internal server error: {str(e)}", code=ErrorCode.INTERNAL_SERVER_ERROR, details={ "status_code": e.status_code if hasattr(e, "status_code") else None, @@ -987,7 +1104,38 @@ class AnthropicClient(LLMClientBase): }, ) - return super().handle_llm_error(e) + if isinstance(e, anthropic.APIStatusError): + logger.warning(f"[Anthropic] API status error: {str(e)}") + if (hasattr(e, "status_code") and e.status_code == 402) or is_insufficient_credits_message(str(e)): + msg = str(e) + return LLMInsufficientCreditsError( + message=f"Insufficient credits (BYOK): {msg}" if is_byok else f"Insufficient credits: {msg}", + code=ErrorCode.PAYMENT_REQUIRED, + details={"status_code": getattr(e, "status_code", None), "is_byok": is_byok}, + ) + if hasattr(e, "status_code") and e.status_code == 413: + logger.warning(f"[Anthropic] Request too large (413): {str(e)}") + return ContextWindowExceededError( + message=f"Request too large for Anthropic (413): {str(e)}", + details={"is_byok": is_byok}, + ) + if "overloaded" in str(e).lower(): + return LLMProviderOverloaded( + message=f"Anthropic API is overloaded: {str(e)}", + code=ErrorCode.INTERNAL_SERVER_ERROR, + details={"is_byok": is_byok}, + ) + return LLMServerError( + message=f"Anthropic API error: {str(e)}", + code=ErrorCode.INTERNAL_SERVER_ERROR, + details={ + "status_code": e.status_code if hasattr(e, "status_code") else None, + "response": str(e.response) if hasattr(e, "response") else None, + "is_byok": is_byok, + }, + ) + + return super().handle_llm_error(e, llm_config=llm_config) def extract_usage_statistics(self, response_data: dict | None, llm_config: LLMConfig) -> LettaUsageStatistics: """Extract usage statistics from Anthropic response and return as LettaUsageStatistics.""" @@ -1027,6 +1175,11 @@ class AnthropicClient(LLMClientBase): input_messages: List[PydanticMessage], llm_config: LLMConfig, ) -> ChatCompletionResponse: + if isinstance(response_data, str): + raise LLMServerError( + message="Anthropic endpoint returned a raw string instead of a JSON object. This usually indicates the endpoint URL is incorrect or returned an error page.", + code=ErrorCode.INTERNAL_SERVER_ERROR, + ) """ Example response from Claude 3: response.json = { @@ -1096,7 +1249,7 @@ class AnthropicClient(LLMClientBase): args_json = json.loads(arguments) if not isinstance(args_json, dict): raise LLMServerError("Expected parseable json object for arguments") - except: + except Exception: arguments = str(tool_input["function"]["arguments"]) else: arguments = json.dumps(tool_input, indent=2) @@ -1117,7 +1270,23 @@ class AnthropicClient(LLMClientBase): redacted_reasoning_content = content_part.data else: - raise RuntimeError("Unexpected empty content in response") + # Log the full response for debugging + logger.error( + "[Anthropic] Received response with empty content. Response ID: %s, Model: %s, Stop reason: %s, Full response: %s", + response.id, + response.model, + response.stop_reason, + json.dumps(response_data), + ) + raise LLMServerError( + message=f"LLM provider returned empty content in response (ID: {response.id}, model: {response.model}, stop_reason: {response.stop_reason})", + code=ErrorCode.INTERNAL_SERVER_ERROR, + details={ + "response_id": response.id, + "model": response.model, + "stop_reason": response.stop_reason, + }, + ) assert response.role == "assistant" choice = Choice( @@ -1372,7 +1541,7 @@ def is_heartbeat(message: dict, is_ping: bool = False) -> bool: try: message_json = json.loads(message["content"]) - except: + except Exception: return False # Check if message_json is a dict (not int, str, list, etc.) diff --git a/letta/llm_api/azure_client.py b/letta/llm_api/azure_client.py index 7ce10b56..59085100 100644 --- a/letta/llm_api/azure_client.py +++ b/letta/llm_api/azure_client.py @@ -1,18 +1,31 @@ +import json import os from typing import List, Optional, Tuple -from openai import AsyncAzureOpenAI, AzureOpenAI +from openai import AsyncAzureOpenAI, AsyncOpenAI, AsyncStream, AzureOpenAI, OpenAI from openai.types.chat.chat_completion import ChatCompletion +from openai.types.chat.chat_completion_chunk import ChatCompletionChunk +from openai.types.responses.response_stream_event import ResponseStreamEvent +from letta.helpers.json_helpers import sanitize_unicode_surrogates from letta.llm_api.openai_client import OpenAIClient +from letta.log import get_logger from letta.otel.tracing import trace_method from letta.schemas.embedding_config import EmbeddingConfig from letta.schemas.enums import ProviderCategory from letta.schemas.llm_config import LLMConfig from letta.settings import model_settings +logger = get_logger(__name__) + class AzureClient(OpenAIClient): + @staticmethod + def _is_v1_endpoint(base_url: str) -> bool: + if not base_url: + return False + return base_url.rstrip("/").endswith("/openai/v1") + def get_byok_overrides(self, llm_config: LLMConfig) -> Tuple[Optional[str], Optional[str], Optional[str]]: if llm_config.provider_category == ProviderCategory.byok: from letta.services.provider_manager import ProviderManager @@ -29,38 +42,99 @@ class AzureClient(OpenAIClient): return None, None, None + def _resolve_credentials(self, api_key, base_url, api_version): + """Resolve credentials, falling back to env vars. For v1 endpoints, api_version is not required.""" + if not api_key: + api_key = model_settings.azure_api_key or os.environ.get("AZURE_API_KEY") + if not base_url: + base_url = model_settings.azure_base_url or os.environ.get("AZURE_BASE_URL") + if not api_version and not self._is_v1_endpoint(base_url): + api_version = model_settings.azure_api_version or os.environ.get("AZURE_API_VERSION") + return api_key, base_url, api_version + @trace_method def request(self, request_data: dict, llm_config: LLMConfig) -> dict: """ Performs underlying synchronous request to OpenAI API and returns raw response dict. """ api_key, base_url, api_version = self.get_byok_overrides(llm_config) - if not api_key or not base_url or not api_version: - api_key = model_settings.azure_api_key or os.environ.get("AZURE_API_KEY") - base_url = model_settings.azure_base_url or os.environ.get("AZURE_BASE_URL") - api_version = model_settings.azure_api_version or os.environ.get("AZURE_API_VERSION") + api_key, base_url, api_version = self._resolve_credentials(api_key, base_url, api_version) - client = AzureOpenAI(api_key=api_key, azure_endpoint=base_url, api_version=api_version) - response: ChatCompletion = client.chat.completions.create(**request_data) - return response.model_dump() + if self._is_v1_endpoint(base_url): + client = OpenAI(api_key=api_key, base_url=base_url) + else: + client = AzureOpenAI(api_key=api_key, azure_endpoint=base_url, api_version=api_version) + + # Route based on payload shape: Responses uses 'input', Chat Completions uses 'messages' + if "input" in request_data and "messages" not in request_data: + resp = client.responses.create(**request_data) + return resp.model_dump() + else: + response: ChatCompletion = client.chat.completions.create(**request_data) + return response.model_dump() @trace_method async def request_async(self, request_data: dict, llm_config: LLMConfig) -> dict: """ Performs underlying asynchronous request to OpenAI API and returns raw response dict. """ - api_key, base_url, api_version = await self.get_byok_overrides_async(llm_config) - if not api_key or not base_url or not api_version: - api_key = model_settings.azure_api_key or os.environ.get("AZURE_API_KEY") - base_url = model_settings.azure_base_url or os.environ.get("AZURE_BASE_URL") - api_version = model_settings.azure_api_version or os.environ.get("AZURE_API_VERSION") - try: - client = AsyncAzureOpenAI(api_key=api_key, azure_endpoint=base_url, api_version=api_version) - response: ChatCompletion = await client.chat.completions.create(**request_data) - except Exception as e: - raise self.handle_llm_error(e) + request_data = sanitize_unicode_surrogates(request_data) - return response.model_dump() + api_key, base_url, api_version = await self.get_byok_overrides_async(llm_config) + api_key, base_url, api_version = self._resolve_credentials(api_key, base_url, api_version) + + try: + if self._is_v1_endpoint(base_url): + client = AsyncOpenAI(api_key=api_key, base_url=base_url) + else: + client = AsyncAzureOpenAI(api_key=api_key, azure_endpoint=base_url, api_version=api_version) + + # Route based on payload shape: Responses uses 'input', Chat Completions uses 'messages' + if "input" in request_data and "messages" not in request_data: + resp = await client.responses.create(**request_data) + return resp.model_dump() + else: + response: ChatCompletion = await client.chat.completions.create(**request_data) + return response.model_dump() + except Exception as e: + raise self.handle_llm_error(e, llm_config=llm_config) + + @trace_method + async def stream_async(self, request_data: dict, llm_config: LLMConfig) -> AsyncStream[ChatCompletionChunk | ResponseStreamEvent]: + """ + Performs underlying asynchronous streaming request to Azure/OpenAI and returns the async stream iterator. + """ + request_data = sanitize_unicode_surrogates(request_data) + + api_key, base_url, api_version = await self.get_byok_overrides_async(llm_config) + api_key, base_url, api_version = self._resolve_credentials(api_key, base_url, api_version) + + if self._is_v1_endpoint(base_url): + client = AsyncOpenAI(api_key=api_key, base_url=base_url) + else: + client = AsyncAzureOpenAI(api_key=api_key, azure_endpoint=base_url, api_version=api_version) + + # Route based on payload shape: Responses uses 'input', Chat Completions uses 'messages' + if "input" in request_data and "messages" not in request_data: + try: + response_stream: AsyncStream[ResponseStreamEvent] = await client.responses.create( + **request_data, + stream=True, + ) + except Exception as e: + logger.error(f"Error streaming Azure Responses request: {e} with request data: {json.dumps(request_data)}") + raise e + else: + try: + response_stream: AsyncStream[ChatCompletionChunk] = await client.chat.completions.create( + **request_data, + stream=True, + stream_options={"include_usage": True}, + ) + except Exception as e: + logger.error(f"Error streaming Azure Chat Completions request: {e} with request data: {json.dumps(request_data)}") + raise e + return response_stream @trace_method async def request_embeddings(self, inputs: List[str], embedding_config: EmbeddingConfig) -> List[List[float]]: @@ -68,7 +142,12 @@ class AzureClient(OpenAIClient): api_key = model_settings.azure_api_key or os.environ.get("AZURE_API_KEY") base_url = model_settings.azure_base_url or os.environ.get("AZURE_BASE_URL") api_version = model_settings.azure_api_version or os.environ.get("AZURE_API_VERSION") - client = AsyncAzureOpenAI(api_key=api_key, api_version=api_version, azure_endpoint=base_url) + + if self._is_v1_endpoint(base_url): + client = AsyncOpenAI(api_key=api_key, base_url=base_url) + else: + client = AsyncAzureOpenAI(api_key=api_key, api_version=api_version, azure_endpoint=base_url) + response = await client.embeddings.create(model=embedding_config.embedding_model, input=inputs) # TODO: add total usage diff --git a/letta/llm_api/chatgpt_oauth_client.py b/letta/llm_api/chatgpt_oauth_client.py index 86854c06..e5c8f4c0 100644 --- a/letta/llm_api/chatgpt_oauth_client.py +++ b/letta/llm_api/chatgpt_oauth_client.py @@ -1,10 +1,10 @@ """ChatGPT OAuth Client - handles requests to chatgpt.com/backend-api/codex/responses.""" +import asyncio import json -from typing import Any, AsyncIterator, Callable, Dict, List, Optional, Union +from typing import Any, AsyncIterator, Dict, List, Optional import httpx -from openai.types.chat.chat_completion_chunk import ChatCompletionChunk from openai.types.responses import ( Response, ResponseCompletedEvent, @@ -32,6 +32,7 @@ from openai.types.responses.response_stream_event import ResponseStreamEvent from letta.errors import ( ContextWindowExceededError, ErrorCode, + LettaError, LLMAuthenticationError, LLMBadRequestError, LLMConnectionError, @@ -39,6 +40,7 @@ from letta.errors import ( LLMServerError, LLMTimeoutError, ) +from letta.helpers.json_helpers import sanitize_unicode_surrogates from letta.llm_api.llm_client_base import LLMClientBase from letta.log import get_logger from letta.otel.tracing import trace_method @@ -47,11 +49,6 @@ from letta.schemas.llm_config import LLMConfig from letta.schemas.message import Message as PydanticMessage from letta.schemas.openai.chat_completion_response import ( ChatCompletionResponse, - Choice, - FunctionCall, - Message as ChoiceMessage, - ToolCall, - UsageStatistics, ) from letta.schemas.providers.chatgpt_oauth import ChatGPTOAuthCredentials, ChatGPTOAuthProvider from letta.schemas.usage import LettaUsageStatistics @@ -100,6 +97,10 @@ class ChatGPTOAuthClient(LLMClientBase): 4. Transforms responses back to OpenAI ChatCompletion format """ + MAX_RETRIES = 3 + # Transient httpx errors that are safe to retry (connection drops, transport-level failures) + _RETRYABLE_ERRORS = (httpx.ReadError, httpx.WriteError, httpx.ConnectError, httpx.RemoteProtocolError, LLMConnectionError) + @trace_method async def _get_provider_and_credentials_async(self, llm_config: LLMConfig) -> tuple[ChatGPTOAuthProvider, ChatGPTOAuthCredentials]: """Get the ChatGPT OAuth provider and credentials with automatic refresh if needed. @@ -153,6 +154,11 @@ class ChatGPTOAuthClient(LLMClientBase): Returns: Dictionary of HTTP headers. """ + if not creds.access_token: + raise LLMAuthenticationError( + message="ChatGPT OAuth access_token is empty or missing", + code=ErrorCode.UNAUTHENTICATED, + ) return { "Authorization": f"Bearer {creds.access_token}", "ChatGPT-Account-Id": creds.account_id, @@ -356,38 +362,68 @@ class ChatGPTOAuthClient(LLMClientBase): Returns: Response data in OpenAI ChatCompletion format. """ + request_data = sanitize_unicode_surrogates(request_data) + _, creds = await self._get_provider_and_credentials_async(llm_config) headers = self._build_headers(creds) endpoint = llm_config.model_endpoint or CHATGPT_CODEX_ENDPOINT # ChatGPT backend requires streaming, so we use client.stream() to handle SSE - async with httpx.AsyncClient() as client: + # Retry on transient network errors with exponential backoff + for attempt in range(self.MAX_RETRIES): try: - async with client.stream( - "POST", - endpoint, - json=request_data, - headers=headers, - timeout=120.0, - ) as response: - response.raise_for_status() - # Accumulate SSE events into a final response - return await self._accumulate_sse_response(response) + async with httpx.AsyncClient() as client: + async with client.stream( + "POST", + endpoint, + json=request_data, + headers=headers, + timeout=120.0, + ) as response: + response.raise_for_status() + # Accumulate SSE events into a final response + return await self._accumulate_sse_response(response) except httpx.HTTPStatusError as e: - raise self._handle_http_error(e) + mapped = self._handle_http_error(e) + if isinstance(mapped, tuple(self._RETRYABLE_ERRORS)) and attempt < self.MAX_RETRIES - 1: + wait = 2**attempt + logger.warning( + f"[ChatGPT] Retryable HTTP error on request (attempt {attempt + 1}/{self.MAX_RETRIES}), " + f"retrying in {wait}s: {type(mapped).__name__}: {mapped}" + ) + await asyncio.sleep(wait) + continue + raise mapped except httpx.TimeoutException: raise LLMTimeoutError( message="ChatGPT backend request timed out", code=ErrorCode.TIMEOUT, ) + except self._RETRYABLE_ERRORS as e: + if attempt < self.MAX_RETRIES - 1: + wait = 2**attempt + logger.warning( + f"[ChatGPT] Transient error on request (attempt {attempt + 1}/{self.MAX_RETRIES}), " + f"retrying in {wait}s: {type(e).__name__}: {e}" + ) + await asyncio.sleep(wait) + continue + raise LLMConnectionError( + message=f"Failed to connect to ChatGPT backend after {self.MAX_RETRIES} attempts: {str(e)}", + code=ErrorCode.INTERNAL_SERVER_ERROR, + details={"cause": str(e.__cause__) if e.__cause__ else None, "error_type": type(e).__name__}, + ) except httpx.RequestError as e: raise LLMConnectionError( message=f"Failed to connect to ChatGPT backend: {str(e)}", code=ErrorCode.INTERNAL_SERVER_ERROR, ) + # Should not be reached, but satisfy type checker + raise LLMConnectionError(message="ChatGPT request failed after all retries", code=ErrorCode.INTERNAL_SERVER_ERROR) + async def _accumulate_sse_response(self, response: httpx.Response) -> dict: """Accumulate SSE stream into a final response. @@ -550,64 +586,102 @@ class ChatGPTOAuthClient(LLMClientBase): Returns: Async generator yielding ResponseStreamEvent objects. """ + request_data = sanitize_unicode_surrogates(request_data) + _, creds = await self._get_provider_and_credentials_async(llm_config) headers = self._build_headers(creds) endpoint = llm_config.model_endpoint or CHATGPT_CODEX_ENDPOINT async def stream_generator(): - event_count = 0 # Track output item index for proper event construction output_index = 0 # Track sequence_number in case backend doesn't provide it # (OpenAI SDK expects incrementing sequence numbers starting at 0) sequence_counter = 0 + # Track whether we've yielded any events — once we have, we can't + # transparently retry because the caller has already consumed partial data. + has_yielded = False - async with httpx.AsyncClient() as client: - async with client.stream( - "POST", - endpoint, - json=request_data, - headers=headers, - timeout=120.0, - ) as response: - # Check for error status - if response.status_code != 200: - error_body = await response.aread() - logger.error(f"ChatGPT SSE error: {response.status_code} - {error_body}") - raise self._handle_http_error_from_status(response.status_code, error_body.decode()) + for attempt in range(self.MAX_RETRIES): + try: + async with httpx.AsyncClient() as client: + async with client.stream( + "POST", + endpoint, + json=request_data, + headers=headers, + timeout=120.0, + ) as response: + # Check for error status + if response.status_code != 200: + error_body = await response.aread() + logger.error(f"ChatGPT SSE error: {response.status_code} - {error_body}") + raise self._handle_http_error_from_status(response.status_code, error_body.decode()) - async for line in response.aiter_lines(): - if not line.startswith("data: "): - continue + async for line in response.aiter_lines(): + if not line or not line.startswith("data: "): + continue - data_str = line[6:] - if data_str == "[DONE]": - break + data_str = line[6:] + if data_str == "[DONE]": + break - try: - raw_event = json.loads(data_str) - event_type = raw_event.get("type") - event_count += 1 + try: + raw_event = json.loads(data_str) + event_type = raw_event.get("type") - # Use backend-provided sequence_number if available, else use counter - # This ensures proper ordering even if backend doesn't provide it - if "sequence_number" not in raw_event: - raw_event["sequence_number"] = sequence_counter - sequence_counter = raw_event["sequence_number"] + 1 + # Check for error events from the API (context window, rate limit, etc.) + if event_type == "error": + logger.error(f"ChatGPT SSE error event: {json.dumps(raw_event, default=str)[:1000]}") + raise self._handle_sse_error_event(raw_event) - # Track output index for output_item.added events - if event_type == "response.output_item.added": - output_index = raw_event.get("output_index", output_index) + # Check for response.failed or response.incomplete events + if event_type in ("response.failed", "response.incomplete"): + logger.error(f"ChatGPT SSE {event_type} event: {json.dumps(raw_event, default=str)[:1000]}") + resp_obj = raw_event.get("response", {}) + error_info = resp_obj.get("error", {}) + if error_info: + raise self._handle_sse_error_event({"error": error_info, "type": event_type}) + else: + raise LLMBadRequestError( + message=f"ChatGPT request failed with status '{event_type}' (no error details provided)", + code=ErrorCode.INTERNAL_SERVER_ERROR, + ) - # Convert to OpenAI SDK ResponseStreamEvent - sdk_event = self._convert_to_sdk_event(raw_event, output_index) - if sdk_event: - yield sdk_event + # Use backend-provided sequence_number if available, else use counter + # This ensures proper ordering even if backend doesn't provide it + if "sequence_number" not in raw_event: + raw_event["sequence_number"] = sequence_counter + sequence_counter = raw_event["sequence_number"] + 1 - except json.JSONDecodeError: - logger.warning(f"Failed to parse SSE event: {data_str[:100]}") - continue + # Track output index for output_item.added events + if event_type == "response.output_item.added": + output_index = raw_event.get("output_index", output_index) + + # Convert to OpenAI SDK ResponseStreamEvent + sdk_event = self._convert_to_sdk_event(raw_event, output_index) + if sdk_event: + yield sdk_event + has_yielded = True + + except json.JSONDecodeError: + logger.warning(f"Failed to parse SSE event: {data_str[:100]}") + continue + + # Stream completed successfully + return + + except self._RETRYABLE_ERRORS as e: + if has_yielded or attempt >= self.MAX_RETRIES - 1: + # Already yielded partial data or exhausted retries — must propagate + raise + wait = 2**attempt + logger.warning( + f"[ChatGPT] Transient error on stream (attempt {attempt + 1}/{self.MAX_RETRIES}), " + f"retrying in {wait}s: {type(e).__name__}: {e}" + ) + await asyncio.sleep(wait) # Wrap the async generator in AsyncStreamWrapper to provide context manager protocol return AsyncStreamWrapper(stream_generator()) @@ -944,10 +1018,16 @@ class ChatGPTOAuthClient(LLMClientBase): part=part, ) - # Unhandled event types - log for debugging - logger.debug(f"Unhandled SSE event type: {event_type}") + # Unhandled event types + logger.warning(f"Unhandled ChatGPT SSE event type: {event_type}") return None + @staticmethod + def _is_upstream_connection_error(error_body: str) -> bool: + """Check if an error body indicates an upstream connection/proxy failure.""" + lower = error_body.lower() + return "upstream connect error" in lower or "reset before headers" in lower or "connection termination" in lower + def _handle_http_error_from_status(self, status_code: int, error_body: str) -> Exception: """Create appropriate exception from HTTP status code. @@ -968,9 +1048,14 @@ class ChatGPTOAuthClient(LLMClientBase): message=f"ChatGPT rate limit exceeded: {error_body}", code=ErrorCode.RATE_LIMIT_EXCEEDED, ) + elif status_code == 502 or (status_code >= 500 and self._is_upstream_connection_error(error_body)): + return LLMConnectionError( + message=f"ChatGPT upstream connection error: {error_body}", + code=ErrorCode.INTERNAL_SERVER_ERROR, + ) elif status_code >= 500: return LLMServerError( - message=f"ChatGPT server error: {error_body}", + message=f"ChatGPT API error: {error_body}", code=ErrorCode.INTERNAL_SERVER_ERROR, ) else: @@ -992,25 +1077,43 @@ class ChatGPTOAuthClient(LLMClientBase): return "o1" in model or "o3" in model or "o4" in model or "gpt-5" in model @trace_method - def handle_llm_error(self, e: Exception) -> Exception: + def handle_llm_error(self, e: Exception, llm_config: Optional[LLMConfig] = None) -> Exception: """Map ChatGPT-specific errors to common LLMError types. Args: e: Original exception. + llm_config: Optional LLM config to determine if this is a BYOK key. Returns: Mapped LLMError subclass. """ + is_byok = (llm_config.provider_category == ProviderCategory.byok) if llm_config else None + + # Already a typed LLM/Letta error (e.g. from SSE error handling) — pass through + if isinstance(e, LettaError): + return e + if isinstance(e, httpx.HTTPStatusError): - return self._handle_http_error(e) + return self._handle_http_error(e, is_byok=is_byok) - return super().handle_llm_error(e) + # Handle httpx network errors which can occur during streaming + # when the connection is unexpectedly closed while reading/writing + if isinstance(e, (httpx.ReadError, httpx.WriteError, httpx.ConnectError)): + logger.warning(f"[ChatGPT] Network error during streaming: {type(e).__name__}: {e}") + return LLMConnectionError( + message=f"Network error during ChatGPT streaming: {str(e)}", + code=ErrorCode.INTERNAL_SERVER_ERROR, + details={"cause": str(e.__cause__) if e.__cause__ else None, "error_type": type(e).__name__, "is_byok": is_byok}, + ) - def _handle_http_error(self, e: httpx.HTTPStatusError) -> Exception: + return super().handle_llm_error(e, llm_config=llm_config) + + def _handle_http_error(self, e: httpx.HTTPStatusError, is_byok: bool | None = None) -> Exception: """Handle HTTP status errors from ChatGPT backend. Args: e: HTTP status error. + is_byok: Whether the request used a BYOK key. Returns: Appropriate LLMError subclass. @@ -1028,28 +1131,86 @@ class ChatGPTOAuthClient(LLMClientBase): return LLMAuthenticationError( message=f"ChatGPT authentication failed: {error_message}", code=ErrorCode.UNAUTHENTICATED, + details={"is_byok": is_byok}, ) elif status_code == 429: return LLMRateLimitError( message=f"ChatGPT rate limit exceeded: {error_message}", code=ErrorCode.RATE_LIMIT_EXCEEDED, + details={"is_byok": is_byok}, ) elif status_code == 400: if "context" in error_message.lower() or "token" in error_message.lower(): return ContextWindowExceededError( message=f"ChatGPT context window exceeded: {error_message}", + details={"is_byok": is_byok}, ) return LLMBadRequestError( message=f"ChatGPT bad request: {error_message}", code=ErrorCode.INVALID_ARGUMENT, + details={"is_byok": is_byok}, + ) + elif status_code == 502 or (status_code >= 500 and self._is_upstream_connection_error(error_message)): + return LLMConnectionError( + message=f"ChatGPT upstream connection error: {error_message}", + code=ErrorCode.INTERNAL_SERVER_ERROR, + details={"is_byok": is_byok}, ) elif status_code >= 500: return LLMServerError( - message=f"ChatGPT server error: {error_message}", + message=f"ChatGPT API error: {error_message}", code=ErrorCode.INTERNAL_SERVER_ERROR, + details={"is_byok": is_byok}, ) else: return LLMBadRequestError( message=f"ChatGPT request failed ({status_code}): {error_message}", code=ErrorCode.INTERNAL_SERVER_ERROR, + details={"is_byok": is_byok}, + ) + + def _handle_sse_error_event(self, raw_event: dict) -> Exception: + """Create appropriate exception from an SSE error or response.failed event. + + The ChatGPT backend can return errors as SSE events within a 200 OK stream, + e.g. {"type": "error", "error": {"type": "invalid_request_error", + "code": "context_length_exceeded", "message": "..."}}. + + Args: + raw_event: Raw SSE event data containing an error. + + Returns: + Appropriate LLM exception. + """ + error_obj = raw_event.get("error", {}) + if isinstance(error_obj, str): + error_message = error_obj + error_code = None + else: + error_message = error_obj.get("message", "Unknown ChatGPT SSE error") + error_code = error_obj.get("code") or None + + if error_code == "context_length_exceeded": + return ContextWindowExceededError( + message=f"ChatGPT context window exceeded: {error_message}", + ) + elif error_code == "rate_limit_exceeded": + return LLMRateLimitError( + message=f"ChatGPT rate limit exceeded: {error_message}", + code=ErrorCode.RATE_LIMIT_EXCEEDED, + ) + elif error_code == "authentication_error": + return LLMAuthenticationError( + message=f"ChatGPT authentication failed: {error_message}", + code=ErrorCode.UNAUTHENTICATED, + ) + elif error_code == "server_error": + return LLMServerError( + message=f"ChatGPT API error: {error_message}", + code=ErrorCode.INTERNAL_SERVER_ERROR, + ) + else: + return LLMBadRequestError( + message=f"ChatGPT SSE error ({error_code or 'unknown'}): {error_message}", + code=ErrorCode.INVALID_ARGUMENT, ) diff --git a/letta/llm_api/deepseek_client.py b/letta/llm_api/deepseek_client.py index 0703445d..5b2b6ef0 100644 --- a/letta/llm_api/deepseek_client.py +++ b/letta/llm_api/deepseek_client.py @@ -5,6 +5,7 @@ from openai import AsyncOpenAI, AsyncStream, OpenAI from openai.types.chat.chat_completion import ChatCompletion from openai.types.chat.chat_completion_chunk import ChatCompletionChunk +from letta.helpers.json_helpers import sanitize_unicode_surrogates from letta.llm_api.openai_client import OpenAIClient from letta.log import get_logger from letta.otel.tracing import trace_method @@ -97,6 +98,8 @@ class DeepseekClient(OpenAIClient): """ Performs underlying asynchronous request to OpenAI API and returns raw response dict. """ + request_data = sanitize_unicode_surrogates(request_data) + api_key = model_settings.deepseek_api_key or os.environ.get("DEEPSEEK_API_KEY") client = AsyncOpenAI(api_key=api_key, base_url=llm_config.model_endpoint) @@ -108,6 +111,8 @@ class DeepseekClient(OpenAIClient): """ Performs underlying asynchronous streaming request to OpenAI and returns the async stream iterator. """ + request_data = sanitize_unicode_surrogates(request_data) + api_key = model_settings.deepseek_api_key or os.environ.get("DEEPSEEK_API_KEY") client = AsyncOpenAI(api_key=api_key, base_url=llm_config.model_endpoint) response_stream: AsyncStream[ChatCompletionChunk] = await client.chat.completions.create( diff --git a/letta/llm_api/error_utils.py b/letta/llm_api/error_utils.py index b1d6e356..ad28d036 100644 --- a/letta/llm_api/error_utils.py +++ b/letta/llm_api/error_utils.py @@ -20,3 +20,21 @@ def is_context_window_overflow_message(msg: str) -> bool: or "context_length_exceeded" in msg or "Input tokens exceed the configured limit" in msg ) + + +def is_insufficient_credits_message(msg: str) -> bool: + """Best-effort detection for insufficient credits/quota/billing errors. + + BYOK users on OpenRouter, OpenAI, etc. may exhaust their credits mid-stream + or get rejected pre-flight. We detect these so they map to 402 instead of 400/500. + """ + lower = msg.lower() + return ( + "insufficient credits" in lower + or "requires more credits" in lower + or "add more credits" in lower + or "exceeded your current quota" in lower + or "you've exceeded your budget" in lower + or ("billing" in lower and "hard limit" in lower) + or "can only afford" in lower + ) diff --git a/letta/llm_api/google_ai_client.py b/letta/llm_api/google_ai_client.py index b1281b62..f765a121 100644 --- a/letta/llm_api/google_ai_client.py +++ b/letta/llm_api/google_ai_client.py @@ -8,6 +8,7 @@ from letta.errors import ErrorCode, LLMAuthenticationError, LLMError from letta.llm_api.google_constants import GOOGLE_MODEL_FOR_API_KEY_CHECK from letta.llm_api.google_vertex_client import GoogleVertexClient from letta.log import get_logger +from letta.schemas.llm_config import LLMConfig from letta.settings import model_settings, settings logger = get_logger(__name__) @@ -16,10 +17,27 @@ logger = get_logger(__name__) class GoogleAIClient(GoogleVertexClient): provider_label = "Google AI" - def _get_client(self): + def _get_client(self, llm_config: Optional[LLMConfig] = None): timeout_ms = int(settings.llm_request_timeout_seconds * 1000) + api_key = None + if llm_config: + api_key, _, _ = self.get_byok_overrides(llm_config) + if not api_key: + api_key = model_settings.gemini_api_key return genai.Client( - api_key=model_settings.gemini_api_key, + api_key=api_key, + http_options=HttpOptions(timeout=timeout_ms), + ) + + async def _get_client_async(self, llm_config: Optional[LLMConfig] = None): + timeout_ms = int(settings.llm_request_timeout_seconds * 1000) + api_key = None + if llm_config: + api_key, _, _ = await self.get_byok_overrides_async(llm_config) + if not api_key: + api_key = model_settings.gemini_api_key + return genai.Client( + api_key=api_key, http_options=HttpOptions(timeout=timeout_ms), ) diff --git a/letta/llm_api/google_constants.py b/letta/llm_api/google_constants.py index a7e9151a..ddc58634 100644 --- a/letta/llm_api/google_constants.py +++ b/letta/llm_api/google_constants.py @@ -1,5 +1,6 @@ GOOGLE_MODEL_TO_CONTEXT_LENGTH = { "gemini-3-pro-preview": 1048576, + "gemini-3.1-pro-preview": 1048576, "gemini-3-flash-preview": 1048576, "gemini-2.5-pro": 1048576, "gemini-2.5-flash": 1048576, diff --git a/letta/llm_api/google_vertex_client.py b/letta/llm_api/google_vertex_client.py index b5bac794..13dc5936 100644 --- a/letta/llm_api/google_vertex_client.py +++ b/letta/llm_api/google_vertex_client.py @@ -1,9 +1,11 @@ import base64 +import copy import json import uuid from typing import AsyncIterator, List, Optional import httpx +import pydantic_core from google.genai import Client, errors from google.genai.types import ( FunctionCallingConfig, @@ -21,6 +23,7 @@ from letta.errors import ( LLMAuthenticationError, LLMBadRequestError, LLMConnectionError, + LLMInsufficientCreditsError, LLMNotFoundError, LLMPermissionDeniedError, LLMRateLimitError, @@ -29,12 +32,14 @@ from letta.errors import ( LLMUnprocessableEntityError, ) from letta.helpers.datetime_helpers import get_utc_time_int -from letta.helpers.json_helpers import json_dumps, json_loads +from letta.helpers.json_helpers import json_dumps, json_loads, sanitize_unicode_surrogates +from letta.llm_api.error_utils import is_insufficient_credits_message from letta.llm_api.llm_client_base import LLMClientBase from letta.local_llm.json_parser import clean_json_string_extra_backslash from letta.log import get_logger from letta.otel.tracing import trace_method from letta.schemas.agent import AgentType +from letta.schemas.enums import ProviderCategory from letta.schemas.llm_config import LLMConfig from letta.schemas.message import Message as PydanticMessage from letta.schemas.openai.chat_completion_request import Tool, Tool as OpenAITool @@ -50,8 +55,31 @@ class GoogleVertexClient(LLMClientBase): MAX_RETRIES = model_settings.gemini_max_retries provider_label = "Google Vertex" - def _get_client(self): + def _get_client(self, llm_config: Optional[LLMConfig] = None): timeout_ms = int(settings.llm_request_timeout_seconds * 1000) + if llm_config: + api_key, _, _ = self.get_byok_overrides(llm_config) + if api_key: + return Client( + api_key=api_key, + http_options=HttpOptions(timeout=timeout_ms), + ) + return Client( + vertexai=True, + project=model_settings.google_cloud_project, + location=model_settings.google_cloud_location, + http_options=HttpOptions(api_version="v1", timeout=timeout_ms), + ) + + async def _get_client_async(self, llm_config: Optional[LLMConfig] = None): + timeout_ms = int(settings.llm_request_timeout_seconds * 1000) + if llm_config: + api_key, _, _ = await self.get_byok_overrides_async(llm_config) + if api_key: + return Client( + api_key=api_key, + http_options=HttpOptions(timeout=timeout_ms), + ) return Client( vertexai=True, project=model_settings.google_cloud_project, @@ -71,22 +99,36 @@ class GoogleVertexClient(LLMClientBase): Performs underlying request to llm and returns raw response. """ try: - client = self._get_client() + client = self._get_client(llm_config) response = client.models.generate_content( model=llm_config.model, contents=request_data["contents"], config=request_data["config"], ) return response.model_dump() + except pydantic_core._pydantic_core.ValidationError as e: + # Handle Pydantic validation errors from the Google SDK + # This occurs when tool schemas contain unsupported fields + logger.error( + f"Pydantic validation error when calling {self._provider_name()} API. Tool schema contains unsupported fields. Error: {e}" + ) + raise LLMBadRequestError( + message=f"Invalid tool schema for {self._provider_name()}: Tool parameters contain unsupported fields. " + f"Common issues: 'const', 'default', 'additionalProperties' are not supported by Google AI. " + f"Please check your tool definitions. Error: {str(e)}", + code=ErrorCode.INTERNAL_SERVER_ERROR, + ) except Exception as e: - raise self.handle_llm_error(e) + raise self.handle_llm_error(e, llm_config=llm_config) @trace_method async def request_async(self, request_data: dict, llm_config: LLMConfig) -> dict: """ Performs underlying request to llm and returns raw response. """ - client = self._get_client() + request_data = sanitize_unicode_surrogates(request_data) + + client = await self._get_client_async(llm_config) # Gemini 2.5 models will often return MALFORMED_FUNCTION_CALL, force a retry # https://github.com/googleapis/python-aiplatform/issues/4472 @@ -100,17 +142,30 @@ class GoogleVertexClient(LLMClientBase): contents=request_data["contents"], config=request_data["config"], ) + except pydantic_core._pydantic_core.ValidationError as e: + # Handle Pydantic validation errors from the Google SDK + # This occurs when tool schemas contain unsupported fields + logger.error( + f"Pydantic validation error when calling {self._provider_name()} API. " + f"Tool schema contains unsupported fields. Error: {e}" + ) + raise LLMBadRequestError( + message=f"Invalid tool schema for {self._provider_name()}: Tool parameters contain unsupported fields. " + f"Common issues: 'const', 'default', 'additionalProperties' are not supported by Google AI. " + f"Please check your tool definitions. Error: {str(e)}", + code=ErrorCode.INTERNAL_SERVER_ERROR, + ) except errors.APIError as e: # Retry on 503 and 500 errors as well, usually ephemeral from Gemini if e.code == 503 or e.code == 500 or e.code == 504: logger.warning(f"Received {e}, retrying {retry_count}/{self.MAX_RETRIES}") retry_count += 1 if retry_count > self.MAX_RETRIES: - raise self.handle_llm_error(e) + raise self.handle_llm_error(e, llm_config=llm_config) continue - raise self.handle_llm_error(e) + raise self.handle_llm_error(e, llm_config=llm_config) except Exception as e: - raise self.handle_llm_error(e) + raise self.handle_llm_error(e, llm_config=llm_config) response_data = response.model_dump() is_malformed_function_call = self.is_malformed_function_call(response_data) if is_malformed_function_call: @@ -148,7 +203,9 @@ class GoogleVertexClient(LLMClientBase): @trace_method async def stream_async(self, request_data: dict, llm_config: LLMConfig) -> AsyncIterator[GenerateContentResponse]: - client = self._get_client() + request_data = sanitize_unicode_surrogates(request_data) + + client = await self._get_client_async(llm_config) try: response = await client.aio.models.generate_content_stream( @@ -156,13 +213,35 @@ class GoogleVertexClient(LLMClientBase): contents=request_data["contents"], config=request_data["config"], ) + except pydantic_core._pydantic_core.ValidationError as e: + # Handle Pydantic validation errors from the Google SDK + # This occurs when tool schemas contain unsupported fields + logger.error( + f"Pydantic validation error when calling {self._provider_name()} API. Tool schema contains unsupported fields. Error: {e}" + ) + raise LLMBadRequestError( + message=f"Invalid tool schema for {self._provider_name()}: Tool parameters contain unsupported fields. " + f"Common issues: 'const', 'default', 'additionalProperties' are not supported by Google AI. " + f"Please check your tool definitions. Error: {str(e)}", + code=ErrorCode.INTERNAL_SERVER_ERROR, + ) + except errors.APIError as e: + raise self.handle_llm_error(e) except Exception as e: logger.error(f"Error streaming {self._provider_name()} request: {e} with request data: {json.dumps(request_data)}") raise e # Direct yield - keeps response alive in generator's local scope throughout iteration # This is required because the SDK's connection lifecycle is tied to the response object - async for chunk in response: - yield chunk + try: + async for chunk in response: + yield chunk + except errors.ClientError as e: + if e.code == 499: + logger.info(f"{self._provider_prefix()} Stream cancelled by client (499): {e}") + return + raise self.handle_llm_error(e, llm_config=llm_config) + except errors.APIError as e: + raise self.handle_llm_error(e, llm_config=llm_config) @staticmethod def add_dummy_model_messages(messages: List[dict]) -> List[dict]: @@ -196,7 +275,7 @@ class GoogleVertexClient(LLMClientBase): # Per https://ai.google.dev/gemini-api/docs/function-calling?example=meeting#notes_and_limitations # * Only a subset of the OpenAPI schema is supported. # * Supported parameter types in Python are limited. - unsupported_keys = ["default", "exclusiveMaximum", "exclusiveMinimum", "additionalProperties", "$schema"] + unsupported_keys = ["default", "exclusiveMaximum", "exclusiveMinimum", "additionalProperties", "$schema", "const", "$ref"] keys_to_remove_at_this_level = [key for key in unsupported_keys if key in schema_part] for key_to_remove in keys_to_remove_at_this_level: logger.debug(f"Removing unsupported keyword '{key_to_remove}' from schema part.") @@ -223,6 +302,49 @@ class GoogleVertexClient(LLMClientBase): for item_schema in schema_part[key]: self._clean_google_ai_schema_properties(item_schema) + def _resolve_json_schema_refs(self, schema: dict, defs: dict | None = None) -> dict: + """ + Recursively resolve $ref in JSON schema by inlining definitions. + Google GenAI SDK does not support $ref. + """ + if defs is None: + # Look for definitions at the top level + defs = schema.get("$defs") or schema.get("definitions") or {} + + if not isinstance(schema, dict): + return schema + + # If this is a ref, resolve it + if "$ref" in schema: + ref = schema["$ref"] + if isinstance(ref, str): + for prefix in ("#/$defs/", "#/definitions/"): + if ref.startswith(prefix): + ref_name = ref.split("/")[-1] + if ref_name in defs: + resolved = defs[ref_name].copy() + return self._resolve_json_schema_refs(resolved, defs) + break + + logger.warning(f"Could not resolve $ref '{ref}' in schema — will be stripped by schema cleaner") + + # Recursively process children + new_schema = schema.copy() + + # We need to remove $defs/definitions from the output schema as Google doesn't support them + if "$defs" in new_schema: + del new_schema["$defs"] + if "definitions" in new_schema: + del new_schema["definitions"] + + for k, v in new_schema.items(): + if isinstance(v, dict): + new_schema[k] = self._resolve_json_schema_refs(v, defs) + elif isinstance(v, list): + new_schema[k] = [self._resolve_json_schema_refs(i, defs) if isinstance(i, dict) else i for i in v] + + return new_schema + def convert_tools_to_google_ai_format(self, tools: List[Tool], llm_config: LLMConfig) -> List[dict]: """ OpenAI style: @@ -273,7 +395,8 @@ class GoogleVertexClient(LLMClientBase): dict( name=t.function.name, description=t.function.description, - parameters=t.function.parameters, # TODO need to unpack + # Deep copy parameters to avoid modifying the original Tool object + parameters=copy.deepcopy(t.function.parameters) if t.function.parameters else {}, ) for t in tools ] @@ -284,6 +407,8 @@ class GoogleVertexClient(LLMClientBase): # Google AI API only supports a subset of OpenAPI 3.0, so unsupported params must be cleaned if "parameters" in func and isinstance(func["parameters"], dict): + # Resolve $ref in schema because Google AI SDK doesn't support them + func["parameters"] = self._resolve_json_schema_refs(func["parameters"]) self._clean_google_ai_schema_properties(func["parameters"]) # Add inner thoughts @@ -549,6 +674,9 @@ class GoogleVertexClient(LLMClientBase): content=inner_thoughts, tool_calls=[tool_call], ) + if response_message.thought_signature: + thought_signature = base64.b64encode(response_message.thought_signature).decode("utf-8") + openai_response_message.reasoning_content_signature = thought_signature else: openai_response_message.content = inner_thoughts if openai_response_message.tool_calls is None: @@ -670,6 +798,7 @@ class GoogleVertexClient(LLMClientBase): # "candidatesTokenCount": 27, # "totalTokenCount": 36 # } + usage = None if response.usage_metadata: # Extract usage via centralized method from letta.schemas.enums import ProviderType @@ -750,54 +879,80 @@ class GoogleVertexClient(LLMClientBase): return False @trace_method - def handle_llm_error(self, e: Exception) -> Exception: + def handle_llm_error(self, e: Exception, llm_config: Optional[LLMConfig] = None) -> Exception: + is_byok = (llm_config.provider_category == ProviderCategory.byok) if llm_config else None + # Handle Google GenAI specific errors if isinstance(e, errors.ClientError): + if e.code == 499: + logger.info(f"{self._provider_prefix()} Request cancelled by client (499): {e}") + return LLMConnectionError( + message=f"Request to {self._provider_name()} was cancelled (client disconnected): {str(e)}", + code=ErrorCode.INTERNAL_SERVER_ERROR, + details={"status_code": 499, "cause": "client_cancelled", "is_byok": is_byok}, + ) + logger.warning(f"{self._provider_prefix()} Client error ({e.code}): {e}") # Handle specific error codes if e.code == 400: error_str = str(e).lower() - if "context" in error_str and ("exceed" in error_str or "limit" in error_str or "too long" in error_str): + if ("context" in error_str or "token count" in error_str or "tokens allowed" in error_str) and ( + "exceed" in error_str or "limit" in error_str or "too long" in error_str + ): return ContextWindowExceededError( message=f"Bad request to {self._provider_name()} (context window exceeded): {str(e)}", + details={"is_byok": is_byok}, ) else: return LLMBadRequestError( message=f"Bad request to {self._provider_name()}: {str(e)}", - code=ErrorCode.INTERNAL_SERVER_ERROR, + code=ErrorCode.INVALID_ARGUMENT, + details={"is_byok": is_byok}, ) elif e.code == 401: return LLMAuthenticationError( message=f"Authentication failed with {self._provider_name()}: {str(e)}", code=ErrorCode.INTERNAL_SERVER_ERROR, + details={"is_byok": is_byok}, ) elif e.code == 403: return LLMPermissionDeniedError( message=f"Permission denied by {self._provider_name()}: {str(e)}", code=ErrorCode.INTERNAL_SERVER_ERROR, + details={"is_byok": is_byok}, ) elif e.code == 404: return LLMNotFoundError( message=f"Resource not found in {self._provider_name()}: {str(e)}", code=ErrorCode.INTERNAL_SERVER_ERROR, + details={"is_byok": is_byok}, ) elif e.code == 408: return LLMTimeoutError( message=f"Request to {self._provider_name()} timed out: {str(e)}", code=ErrorCode.TIMEOUT, - details={"cause": str(e.__cause__) if e.__cause__ else None}, + details={"cause": str(e.__cause__) if e.__cause__ else None, "is_byok": is_byok}, + ) + elif e.code == 402 or is_insufficient_credits_message(str(e)): + msg = str(e) + return LLMInsufficientCreditsError( + message=f"Insufficient credits (BYOK): {msg}" if is_byok else f"Insufficient credits: {msg}", + code=ErrorCode.PAYMENT_REQUIRED, + details={"status_code": e.code, "is_byok": is_byok}, ) elif e.code == 422: return LLMUnprocessableEntityError( message=f"Invalid request content for {self._provider_name()}: {str(e)}", code=ErrorCode.INTERNAL_SERVER_ERROR, + details={"is_byok": is_byok}, ) elif e.code == 429: logger.warning(f"{self._provider_prefix()} Rate limited (429). Consider backoff.") return LLMRateLimitError( message=f"Rate limited by {self._provider_name()}: {str(e)}", code=ErrorCode.RATE_LIMIT_EXCEEDED, + details={"is_byok": is_byok}, ) else: return LLMServerError( @@ -806,6 +961,7 @@ class GoogleVertexClient(LLMClientBase): details={ "status_code": e.code, "response_json": getattr(e, "response_json", None), + "is_byok": is_byok, }, ) @@ -820,13 +976,14 @@ class GoogleVertexClient(LLMClientBase): details={ "status_code": e.code, "response_json": getattr(e, "response_json", None), + "is_byok": is_byok, }, ) elif e.code == 502: return LLMConnectionError( message=f"Bad gateway from {self._provider_name()}: {str(e)}", code=ErrorCode.INTERNAL_SERVER_ERROR, - details={"cause": str(e.__cause__) if e.__cause__ else None}, + details={"cause": str(e.__cause__) if e.__cause__ else None, "is_byok": is_byok}, ) elif e.code == 503: return LLMServerError( @@ -835,13 +992,14 @@ class GoogleVertexClient(LLMClientBase): details={ "status_code": e.code, "response_json": getattr(e, "response_json", None), + "is_byok": is_byok, }, ) elif e.code == 504: return LLMTimeoutError( message=f"Gateway timeout from {self._provider_name()}: {str(e)}", code=ErrorCode.TIMEOUT, - details={"cause": str(e.__cause__) if e.__cause__ else None}, + details={"cause": str(e.__cause__) if e.__cause__ else None, "is_byok": is_byok}, ) else: return LLMServerError( @@ -850,6 +1008,7 @@ class GoogleVertexClient(LLMClientBase): details={ "status_code": e.code, "response_json": getattr(e, "response_json", None), + "is_byok": is_byok, }, ) @@ -861,6 +1020,7 @@ class GoogleVertexClient(LLMClientBase): details={ "status_code": e.code, "response_json": getattr(e, "response_json", None), + "is_byok": is_byok, }, ) @@ -872,7 +1032,7 @@ class GoogleVertexClient(LLMClientBase): return LLMConnectionError( message=f"Connection error during {self._provider_name()} streaming: {str(e)}", code=ErrorCode.INTERNAL_SERVER_ERROR, - details={"cause": str(e.__cause__) if e.__cause__ else None}, + details={"cause": str(e.__cause__) if e.__cause__ else None, "is_byok": is_byok}, ) # Handle httpx network errors which can occur during streaming @@ -882,7 +1042,7 @@ class GoogleVertexClient(LLMClientBase): return LLMConnectionError( message=f"Network error during {self._provider_name()} streaming: {str(e)}", code=ErrorCode.INTERNAL_SERVER_ERROR, - details={"cause": str(e.__cause__) if e.__cause__ else None, "error_type": type(e).__name__}, + details={"cause": str(e.__cause__) if e.__cause__ else None, "error_type": type(e).__name__, "is_byok": is_byok}, ) # Handle connection-related errors @@ -891,13 +1051,15 @@ class GoogleVertexClient(LLMClientBase): return LLMConnectionError( message=f"Failed to connect to {self._provider_name()}: {str(e)}", code=ErrorCode.INTERNAL_SERVER_ERROR, - details={"cause": str(e.__cause__) if e.__cause__ else None}, + details={"cause": str(e.__cause__) if e.__cause__ else None, "is_byok": is_byok}, ) # Fallback to base implementation for other errors - return super().handle_llm_error(e) + return super().handle_llm_error(e, llm_config=llm_config) - async def count_tokens(self, messages: List[dict] = None, model: str = None, tools: List[OpenAITool] = None) -> int: + async def count_tokens( + self, messages: List[dict] | None = None, model: str | None = None, tools: List[OpenAITool] | None = None + ) -> int: """ Count tokens for the given messages and tools using the Gemini token counting API. diff --git a/letta/llm_api/groq_client.py b/letta/llm_api/groq_client.py index 5909f4d5..5f24669d 100644 --- a/letta/llm_api/groq_client.py +++ b/letta/llm_api/groq_client.py @@ -5,6 +5,7 @@ from openai import AsyncOpenAI, AsyncStream, OpenAI from openai.types.chat.chat_completion import ChatCompletion from openai.types.chat.chat_completion_chunk import ChatCompletionChunk +from letta.helpers.json_helpers import sanitize_unicode_surrogates from letta.llm_api.openai_client import OpenAIClient from letta.otel.tracing import trace_method from letta.schemas.embedding_config import EmbeddingConfig @@ -34,6 +35,11 @@ class GroqClient(OpenAIClient): ) -> dict: data = super().build_request_data(agent_type, messages, llm_config, tools, force_tool_call, requires_subsequent_tool_call) + # Groq only supports string values for tool_choice: "none", "auto", "required" + # Convert object-format tool_choice (used for force_tool_call) to "required" + if "tool_choice" in data and isinstance(data["tool_choice"], dict): + data["tool_choice"] = "required" + # Groq validation - these fields are not supported and will cause 400 errors # https://console.groq.com/docs/openai if "top_logprobs" in data: @@ -69,6 +75,8 @@ class GroqClient(OpenAIClient): """ Performs underlying asynchronous request to Groq API and returns raw response dict. """ + request_data = sanitize_unicode_surrogates(request_data) + api_key = model_settings.groq_api_key or os.environ.get("GROQ_API_KEY") client = AsyncOpenAI(api_key=api_key, base_url=llm_config.model_endpoint) diff --git a/letta/llm_api/helpers.py b/letta/llm_api/helpers.py index aee2b73b..85695a48 100644 --- a/letta/llm_api/helpers.py +++ b/letta/llm_api/helpers.py @@ -1,23 +1,17 @@ import copy import json -import logging from collections import OrderedDict -from typing import Any, List, Optional, Union +from typing import List, Optional -from letta.constants import OPENAI_CONTEXT_WINDOW_ERROR_SUBSTRING from letta.helpers.json_helpers import json_dumps from letta.log import get_logger -from letta.schemas.message import Message from letta.schemas.openai.chat_completion_response import ChatCompletionResponse, Choice from letta.schemas.response_format import ( JsonObjectResponseFormat, JsonSchemaResponseFormat, - ResponseFormatType, ResponseFormatUnion, TextResponseFormat, ) -from letta.settings import summarizer_settings -from letta.utils import printd logger = get_logger(__name__) diff --git a/letta/llm_api/llm_api_tools.py b/letta/llm_api/llm_api_tools.py index 280b3eb0..c4f178e2 100644 --- a/letta/llm_api/llm_api_tools.py +++ b/letta/llm_api/llm_api_tools.py @@ -23,7 +23,7 @@ from letta.local_llm.constants import INNER_THOUGHTS_KWARG from letta.local_llm.utils import num_tokens_from_functions, num_tokens_from_messages from letta.orm.user import User from letta.otel.tracing import log_event, trace_method -from letta.schemas.enums import ProviderCategory +from letta.schemas.enums import LLMCallType, ProviderCategory from letta.schemas.llm_config import LLMConfig from letta.schemas.message import Message from letta.schemas.openai.chat_completion_response import ChatCompletionResponse @@ -245,6 +245,7 @@ def create( request_json=prepare_openai_payload(data), response_json=response.model_json_schema(), step_id=step_id, + call_type=LLMCallType.agent_step, ), ) diff --git a/letta/llm_api/llm_client_base.py b/letta/llm_api/llm_client_base.py index 754a19e8..0cdbe894 100644 --- a/letta/llm_api/llm_client_base.py +++ b/letta/llm_api/llm_client_base.py @@ -10,7 +10,7 @@ from openai.types.chat.chat_completion_chunk import ChatCompletionChunk from letta.errors import ErrorCode, LLMConnectionError, LLMError from letta.otel.tracing import log_event, trace_method from letta.schemas.embedding_config import EmbeddingConfig -from letta.schemas.enums import AgentType, ProviderCategory +from letta.schemas.enums import AgentType, LLMCallType, ProviderCategory from letta.schemas.llm_config import LLMConfig from letta.schemas.message import Message from letta.schemas.openai.chat_completion_response import ChatCompletionResponse @@ -61,8 +61,11 @@ class LLMClientBase: user_id: Optional[str] = None, compaction_settings: Optional[Dict] = None, llm_config: Optional[Dict] = None, + actor: Optional["User"] = None, ) -> None: """Set telemetry context for provider trace logging.""" + if actor is not None: + self.actor = actor self._telemetry_manager = telemetry_manager self._telemetry_agent_id = agent_id self._telemetry_agent_tags = agent_tags @@ -82,6 +85,10 @@ class LLMClientBase: """Wrapper around request_async that logs telemetry for all requests including errors. Call set_telemetry_context() first to set agent_id, run_id, etc. + + Telemetry is logged via TelemetryManager which supports multiple backends + (postgres, clickhouse, socket, etc.) configured via + LETTA_TELEMETRY_PROVIDER_TRACE_BACKEND. """ from letta.log import get_logger @@ -97,6 +104,7 @@ class LLMClientBase: error_type = type(e).__name__ raise finally: + # Log telemetry via configured backends if self._telemetry_manager and settings.track_provider_trace: if self.actor is None: logger.warning(f"Skipping telemetry: actor is None (call_type={self._telemetry_call_type})") @@ -116,24 +124,33 @@ class LLMClientBase: org_id=self._telemetry_org_id, user_id=self._telemetry_user_id, compaction_settings=self._telemetry_compaction_settings, - llm_config=self._telemetry_llm_config, + llm_config=llm_config.model_dump() if llm_config else self._telemetry_llm_config, ), ) except Exception as e: logger.warning(f"Failed to log telemetry: {e}") - async def stream_async_with_telemetry(self, request_data: dict, llm_config: LLMConfig): - """Returns raw stream. Caller should log telemetry after processing via log_provider_trace_async(). - - Call set_telemetry_context() first to set agent_id, run_id, etc. - After consuming the stream, call log_provider_trace_async() with the response data. - """ - return await self.stream_async(request_data, llm_config) - - async def log_provider_trace_async(self, request_data: dict, response_json: dict) -> None: + async def log_provider_trace_async( + self, + request_data: dict, + response_json: Optional[dict], + llm_config: Optional[LLMConfig] = None, + latency_ms: Optional[int] = None, + error_msg: Optional[str] = None, + error_type: Optional[str] = None, + ) -> None: """Log provider trace telemetry. Call after processing LLM response. Uses telemetry context set via set_telemetry_context(). + Telemetry is logged via TelemetryManager which supports multiple backends. + + Args: + request_data: The request payload sent to the LLM + response_json: The response payload from the LLM + llm_config: LLMConfig for extracting provider/model info + latency_ms: Latency in milliseconds (not used currently, kept for API compatibility) + error_msg: Error message if request failed (not used currently) + error_type: Error type if request failed (not used currently) """ from letta.log import get_logger @@ -146,6 +163,13 @@ class LLMClientBase: logger.warning(f"Skipping telemetry: actor is None (call_type={self._telemetry_call_type})") return + if response_json is None: + if error_msg: + response_json = {"error": error_msg, "error_type": error_type} + else: + logger.warning(f"Skipping telemetry: no response_json or error_msg (call_type={self._telemetry_call_type})") + return + try: pydantic_actor = self.actor.to_pydantic() if hasattr(self.actor, "to_pydantic") else self.actor await self._telemetry_manager.create_provider_trace_async( @@ -161,7 +185,7 @@ class LLMClientBase: org_id=self._telemetry_org_id, user_id=self._telemetry_user_id, compaction_settings=self._telemetry_compaction_settings, - llm_config=self._telemetry_llm_config, + llm_config=llm_config.model_dump() if llm_config else self._telemetry_llm_config, ), ) except Exception as e: @@ -204,11 +228,12 @@ class LLMClientBase: request_json=request_data, response_json=response_data, step_id=step_id, + call_type=LLMCallType.agent_step, ), ) log_event(name="llm_response_received", attributes=response_data) except Exception as e: - raise self.handle_llm_error(e) + raise self.handle_llm_error(e, llm_config=llm_config) return await self.convert_response_to_chat_completion(response_data, messages, llm_config) @@ -237,12 +262,13 @@ class LLMClientBase: request_json=request_data, response_json=response_data, step_id=step_id, + call_type=LLMCallType.agent_step, ), ) log_event(name="llm_response_received", attributes=response_data) except Exception as e: - raise self.handle_llm_error(e) + raise self.handle_llm_error(e, llm_config=llm_config) return await self.convert_response_to_chat_completion(response_data, messages, llm_config) @@ -334,17 +360,20 @@ class LLMClientBase: raise NotImplementedError @abstractmethod - def handle_llm_error(self, e: Exception) -> Exception: + def handle_llm_error(self, e: Exception, llm_config: Optional["LLMConfig"] = None) -> Exception: """ Maps provider-specific errors to common LLMError types. Each LLM provider should implement this to translate their specific errors. Args: e: The original provider-specific exception + llm_config: Optional LLM config to determine if this is a BYOK key Returns: An LLMError subclass that represents the error in a provider-agnostic way """ + is_byok = (llm_config.provider_category == ProviderCategory.byok) if llm_config else None + # Handle httpx.RemoteProtocolError which can occur during streaming # when the remote server closes the connection unexpectedly # (e.g., "peer closed connection without sending complete message body") @@ -356,10 +385,10 @@ class LLMClientBase: return LLMConnectionError( message=f"Connection error during streaming: {str(e)}", code=ErrorCode.INTERNAL_SERVER_ERROR, - details={"cause": str(e.__cause__) if e.__cause__ else None}, + details={"cause": str(e.__cause__) if e.__cause__ else None, "is_byok": is_byok}, ) - return LLMError(f"Unhandled LLM error: {str(e)}") + return LLMError(message=f"Unhandled LLM error: {str(e)}", details={"is_byok": is_byok}) def get_byok_overrides(self, llm_config: LLMConfig) -> Tuple[Optional[str], Optional[str], Optional[str]]: """ diff --git a/letta/llm_api/minimax_client.py b/letta/llm_api/minimax_client.py index 6029f460..79769942 100644 --- a/letta/llm_api/minimax_client.py +++ b/letta/llm_api/minimax_client.py @@ -4,6 +4,7 @@ import anthropic from anthropic import AsyncStream from anthropic.types.beta import BetaMessage, BetaRawMessageStreamEvent +from letta.helpers.json_helpers import sanitize_unicode_surrogates from letta.llm_api.anthropic_client import AnthropicClient from letta.log import get_logger from letta.otel.tracing import trace_method @@ -83,6 +84,8 @@ class MiniMaxClient(AnthropicClient): Uses beta messages API for compatibility with Anthropic streaming interfaces. """ + request_data = sanitize_unicode_surrogates(request_data) + client = await self._get_anthropic_client_async(llm_config, async_client=True) try: @@ -105,6 +108,8 @@ class MiniMaxClient(AnthropicClient): Uses beta messages API for compatibility with Anthropic streaming interfaces. """ + request_data = sanitize_unicode_surrogates(request_data) + client = await self._get_anthropic_client_async(llm_config, async_client=True) request_data["stream"] = True diff --git a/letta/llm_api/mistral.py b/letta/llm_api/mistral.py index 8d5b8b10..ec75e0a3 100644 --- a/letta/llm_api/mistral.py +++ b/letta/llm_api/mistral.py @@ -10,7 +10,7 @@ async def mistral_get_model_list_async(url: str, api_key: str) -> dict: url = smart_urljoin(url, "models") headers = {"Content-Type": "application/json"} - if api_key is not None: + if api_key: headers["Authorization"] = f"Bearer {api_key}" logger.debug("Sending request to %s", url) diff --git a/letta/llm_api/openai.py b/letta/llm_api/openai.py index 8dc2318d..deda50e0 100644 --- a/letta/llm_api/openai.py +++ b/letta/llm_api/openai.py @@ -59,7 +59,7 @@ async def openai_get_model_list_async( url = smart_urljoin(url, "models") headers = {"Content-Type": "application/json"} - if api_key is not None: + if api_key: headers["Authorization"] = f"Bearer {api_key}" if "openrouter.ai" in url: if model_settings.openrouter_referer: @@ -86,7 +86,7 @@ async def openai_get_model_list_async( # Handle HTTP errors (e.g., response 4XX, 5XX) try: error_response = http_err.response.json() - except: + except Exception: error_response = {"status_code": http_err.response.status_code, "text": http_err.response.text} logger.debug(f"Got HTTPError, exception={http_err}, response={error_response}") raise http_err @@ -478,7 +478,7 @@ def openai_chat_completions_request_stream( data = prepare_openai_payload(chat_completion_request) data["stream"] = True - kwargs = {"api_key": api_key, "base_url": url, "max_retries": 0} + kwargs = {"api_key": api_key or "DUMMY_API_KEY", "base_url": url, "max_retries": 0} if "openrouter.ai" in url: headers = {} if model_settings.openrouter_referer: @@ -511,7 +511,7 @@ def openai_chat_completions_request( https://platform.openai.com/docs/guides/text-generation?lang=curl """ data = prepare_openai_payload(chat_completion_request) - kwargs = {"api_key": api_key, "base_url": url, "max_retries": 0} + kwargs = {"api_key": api_key or "DUMMY_API_KEY", "base_url": url, "max_retries": 0} if "openrouter.ai" in url: headers = {} if model_settings.openrouter_referer: @@ -524,7 +524,17 @@ def openai_chat_completions_request( log_event(name="llm_request_sent", attributes=data) chat_completion = client.chat.completions.create(**data) log_event(name="llm_response_received", attributes=chat_completion.model_dump()) - return ChatCompletionResponse(**chat_completion.model_dump()) + response = ChatCompletionResponse(**chat_completion.model_dump()) + + # Override tool_call IDs to ensure cross-provider compatibility (matches streaming path behavior) + # Some models (e.g. Kimi via OpenRouter) generate IDs like 'Read:93' which violate Anthropic's pattern + for choice in response.choices: + if choice.message.tool_calls: + for tool_call in choice.message.tool_calls: + if tool_call.id is not None: + tool_call.id = get_tool_call_id() + + return response def prepare_openai_payload(chat_completion_request: ChatCompletionRequest): diff --git a/letta/llm_api/openai_client.py b/letta/llm_api/openai_client.py index ca26c768..0367072f 100644 --- a/letta/llm_api/openai_client.py +++ b/letta/llm_api/openai_client.py @@ -20,6 +20,7 @@ from letta.errors import ( LLMAuthenticationError, LLMBadRequestError, LLMConnectionError, + LLMInsufficientCreditsError, LLMNotFoundError, LLMPermissionDeniedError, LLMRateLimitError, @@ -27,7 +28,8 @@ from letta.errors import ( LLMTimeoutError, LLMUnprocessableEntityError, ) -from letta.llm_api.error_utils import is_context_window_overflow_message +from letta.helpers.json_helpers import sanitize_unicode_surrogates +from letta.llm_api.error_utils import is_context_window_overflow_message, is_insufficient_credits_message from letta.llm_api.helpers import ( add_inner_thoughts_to_functions, convert_response_format_to_responses_api, @@ -39,13 +41,13 @@ from letta.log import get_logger from letta.otel.tracing import trace_method from letta.schemas.agent import AgentType from letta.schemas.embedding_config import EmbeddingConfig +from letta.schemas.enums import ProviderCategory from letta.schemas.letta_message_content import MessageContentType from letta.schemas.llm_config import LLMConfig from letta.schemas.message import Message as PydanticMessage from letta.schemas.openai.chat_completion_request import ( ChatCompletionRequest, FunctionCall as ToolFunctionChoiceFunctionCall, - FunctionSchema, Tool as OpenAITool, ToolFunctionChoice, cast_message_to_subtype, @@ -56,7 +58,6 @@ from letta.schemas.openai.chat_completion_response import ( FunctionCall, Message as ChoiceMessage, ToolCall, - UsageStatistics, ) from letta.schemas.openai.responses_request import ResponsesRequest from letta.schemas.response_format import JsonSchemaResponseFormat @@ -105,7 +106,7 @@ def accepts_developer_role(model: str) -> bool: See: https://community.openai.com/t/developer-role-not-accepted-for-o1-o1-mini-o3-mini/1110750/7 """ - if is_openai_reasoning_model(model) and "o1-mini" not in model or "o1-preview" in model: + if (is_openai_reasoning_model(model) and "o1-mini" not in model) or "o1-preview" in model: return True else: return False @@ -244,6 +245,64 @@ class OpenAIClient(LLMClientBase): def supports_structured_output(self, llm_config: LLMConfig) -> bool: return supports_structured_output(llm_config) + def _is_openrouter_request(self, llm_config: LLMConfig) -> bool: + return (llm_config.model_endpoint and "openrouter.ai" in llm_config.model_endpoint) or (llm_config.provider_name == "openrouter") + + def _is_true_openai_request(self, llm_config: LLMConfig) -> bool: + if llm_config.model_endpoint_type != "openai": + return False + + if self._is_openrouter_request(llm_config): + return False + + # Keep Letta inference endpoint behavior unchanged. + if llm_config.model_endpoint == LETTA_MODEL_ENDPOINT: + return False + + # If provider_name is explicitly set and not openai, don't apply OpenAI-specific prompt caching fields. + if llm_config.provider_name and llm_config.provider_name != "openai": + return False + + return True + + def _normalize_model_name(self, model: Optional[str]) -> Optional[str]: + if not model: + return None + return model.split("/", 1)[-1] + + def _supports_extended_prompt_cache_retention(self, model: Optional[str]) -> bool: + normalized_model = self._normalize_model_name(model) + if not normalized_model: + return False + + # Per OpenAI docs: extended retention is available on gpt-4.1 and gpt-5 family models. + # gpt-5-mini is excluded (not listed in docs). + return normalized_model == "gpt-4.1" or (normalized_model.startswith("gpt-5") and normalized_model != "gpt-5-mini") + + def _apply_prompt_cache_settings( + self, + llm_config: LLMConfig, + model: Optional[str], + messages: List[PydanticMessage], + request_obj: Any, + ) -> None: + """Apply OpenAI prompt cache settings to the request. + + We intentionally do NOT set prompt_cache_key. OpenAI's default routing + (based on a hash of the first ~256 tokens of the prompt) already provides + good cache affinity for Letta agents, since each agent has a unique system + prompt. Setting an explicit key can disrupt existing warm caches and reduce + hit rates. + + We only set prompt_cache_retention to "24h" for models that support extended + retention, which keeps cached prefixes active longer (up to 24h vs 5-10min). + """ + if not self._is_true_openai_request(llm_config): + return + + if self._supports_extended_prompt_cache_retention(model): + request_obj.prompt_cache_retention = "24h" + @trace_method def build_request_data_responses( self, @@ -384,6 +443,13 @@ class OpenAIClient(LLMClientBase): data.model = "memgpt-openai" + self._apply_prompt_cache_settings( + llm_config=llm_config, + model=model, + messages=messages, + request_obj=data, + ) + request_data = data.model_dump(exclude_unset=True) # print("responses request data", request_data) return request_data @@ -452,13 +518,11 @@ class OpenAIClient(LLMClientBase): model = None # TODO: we may need to extend this to more models using proxy? - is_openrouter = (llm_config.model_endpoint and "openrouter.ai" in llm_config.model_endpoint) or ( - llm_config.provider_name == "openrouter" - ) + is_openrouter = self._is_openrouter_request(llm_config) if is_openrouter: try: model = llm_config.handle.split("/", 1)[-1] - except: + except Exception: # don't raise error since this isn't robust against edge cases pass @@ -468,7 +532,12 @@ class OpenAIClient(LLMClientBase): tool_choice = None if tools: # only set tool_choice if tools exist if force_tool_call is not None: - tool_choice = ToolFunctionChoice(type="function", function=ToolFunctionChoiceFunctionCall(name=force_tool_call)) + # OpenRouter proxies to providers that may not support object-format tool_choice + # Use "required" instead which achieves similar effect + if is_openrouter: + tool_choice = "required" + else: + tool_choice = ToolFunctionChoice(type="function", function=ToolFunctionChoiceFunctionCall(name=force_tool_call)) elif requires_subsequent_tool_call: tool_choice = "required" elif self.requires_auto_tool_choice(llm_config) or agent_type == AgentType.letta_v1_agent: @@ -505,6 +574,12 @@ class OpenAIClient(LLMClientBase): if llm_config.frequency_penalty is not None: data.frequency_penalty = llm_config.frequency_penalty + # Add logprobs configuration for RL training + if llm_config.return_logprobs: + data.logprobs = True + if llm_config.top_logprobs is not None: + data.top_logprobs = llm_config.top_logprobs + if tools and supports_parallel_tool_calling(model): data.parallel_tool_calls = False @@ -546,6 +621,13 @@ class OpenAIClient(LLMClientBase): new_tools.append(tool.model_copy(deep=True)) data.tools = new_tools + self._apply_prompt_cache_settings( + llm_config=llm_config, + model=model, + messages=messages, + request_obj=data, + ) + # Note: Tools are already processed by enable_strict_mode() in the workflow/agent code # (temporal_letta_v1_agent_workflow.py or letta_agent_v3.py) before reaching here. # enable_strict_mode() handles: strict flag, additionalProperties, required array, nullable fields @@ -564,6 +646,17 @@ class OpenAIClient(LLMClientBase): # If set, then in the backend "medium" thinking is turned on # request_data["reasoning_effort"] = "medium" + # Add OpenRouter reasoning configuration via extra_body + if is_openrouter and llm_config.enable_reasoner: + reasoning_config = {} + if llm_config.reasoning_effort: + reasoning_config["effort"] = llm_config.reasoning_effort + if llm_config.max_reasoning_tokens and llm_config.max_reasoning_tokens > 0: + reasoning_config["max_tokens"] = llm_config.max_reasoning_tokens + if not reasoning_config: + reasoning_config = {"enabled": True} + request_data["extra_body"] = {"reasoning": reasoning_config} + return request_data @trace_method @@ -571,29 +664,51 @@ class OpenAIClient(LLMClientBase): """ Performs underlying synchronous request to OpenAI API and returns raw response dict. """ + # Sanitize Unicode surrogates to prevent encoding errors + request_data = sanitize_unicode_surrogates(request_data) + client = OpenAI(**self._prepare_client_kwargs(llm_config)) # Route based on payload shape: Responses uses 'input', Chat Completions uses 'messages' - if "input" in request_data and "messages" not in request_data: - resp = client.responses.create(**request_data) - return resp.model_dump() - else: - response: ChatCompletion = client.chat.completions.create(**request_data) - return response.model_dump() + try: + if "input" in request_data and "messages" not in request_data: + resp = client.responses.create(**request_data) + return resp.model_dump() + else: + response: ChatCompletion = client.chat.completions.create(**request_data) + return response.model_dump() + except json.JSONDecodeError as e: + logger.error(f"[OpenAI] Failed to parse API response as JSON: {e}") + raise LLMServerError( + message=f"OpenAI API returned invalid JSON response (likely an HTML error page): {str(e)}", + code=ErrorCode.INTERNAL_SERVER_ERROR, + details={"json_error": str(e), "error_position": f"line {e.lineno} column {e.colno}"}, + ) @trace_method async def request_async(self, request_data: dict, llm_config: LLMConfig) -> dict: """ Performs underlying asynchronous request to OpenAI API and returns raw response dict. """ + # Sanitize Unicode surrogates to prevent encoding errors + request_data = sanitize_unicode_surrogates(request_data) + kwargs = await self._prepare_client_kwargs_async(llm_config) client = AsyncOpenAI(**kwargs) # Route based on payload shape: Responses uses 'input', Chat Completions uses 'messages' - if "input" in request_data and "messages" not in request_data: - resp = await client.responses.create(**request_data) - return resp.model_dump() - else: - response: ChatCompletion = await client.chat.completions.create(**request_data) - return response.model_dump() + try: + if "input" in request_data and "messages" not in request_data: + resp = await client.responses.create(**request_data) + return resp.model_dump() + else: + response: ChatCompletion = await client.chat.completions.create(**request_data) + return response.model_dump() + except json.JSONDecodeError as e: + logger.error(f"[OpenAI] Failed to parse API response as JSON: {e}") + raise LLMServerError( + message=f"OpenAI API returned invalid JSON response (likely an HTML error page): {str(e)}", + code=ErrorCode.INTERNAL_SERVER_ERROR, + details={"json_error": str(e), "error_position": f"line {e.lineno} column {e.colno}"}, + ) def is_reasoning_model(self, llm_config: LLMConfig) -> bool: return is_openai_reasoning_model(llm_config.model) @@ -669,6 +784,12 @@ class OpenAIClient(LLMClientBase): Converts raw OpenAI response dict into the ChatCompletionResponse Pydantic model. Handles potential extraction of inner thoughts if they were added via kwargs. """ + if isinstance(response_data, str): + raise LLMServerError( + message="LLM endpoint returned a raw string instead of a JSON object. This usually indicates the endpoint URL is incorrect or returned an error page.", + code=ErrorCode.INTERNAL_SERVER_ERROR, + details={"raw_response": response_data[:500]}, + ) if "object" in response_data and response_data["object"] == "response": # Map Responses API shape to Chat Completions shape # See example payload in tests/integration_test_send_message_v2.py @@ -696,7 +817,6 @@ class OpenAIClient(LLMClientBase): finish_reason = None # Optionally capture reasoning presence - found_reasoning = False for out in outputs: out_type = (out or {}).get("type") if out_type == "message": @@ -707,7 +827,6 @@ class OpenAIClient(LLMClientBase): if text_val: assistant_text_parts.append(text_val) elif out_type == "reasoning": - found_reasoning = True reasoning_summary_parts = [part.get("text") for part in out.get("summary")] reasoning_content_signature = out.get("encrypted_content") elif out_type == "function_call": @@ -765,12 +884,12 @@ class OpenAIClient(LLMClientBase): ): if "choices" in response_data and len(response_data["choices"]) > 0: choice_data = response_data["choices"][0] - if "message" in choice_data and "reasoning_content" in choice_data["message"]: - reasoning_content = choice_data["message"]["reasoning_content"] - if reasoning_content: - chat_completion_response.choices[0].message.reasoning_content = reasoning_content - - chat_completion_response.choices[0].message.reasoning_content_signature = None + message_data = choice_data.get("message", {}) + # Check for reasoning_content (standard) or reasoning (OpenRouter) + reasoning_content = message_data.get("reasoning_content") or message_data.get("reasoning") + if reasoning_content: + chat_completion_response.choices[0].message.reasoning_content = reasoning_content + chat_completion_response.choices[0].message.reasoning_content_signature = None # Unpack inner thoughts if they were embedded in function arguments if llm_config.put_inner_thoughts_in_kwargs: @@ -789,6 +908,9 @@ class OpenAIClient(LLMClientBase): """ Performs underlying asynchronous streaming request to OpenAI and returns the async stream iterator. """ + # Sanitize Unicode surrogates to prevent encoding errors + request_data = sanitize_unicode_surrogates(request_data) + kwargs = await self._prepare_client_kwargs_async(llm_config) client = AsyncOpenAI(**kwargs) @@ -820,6 +942,9 @@ class OpenAIClient(LLMClientBase): """ Performs underlying asynchronous streaming request to OpenAI and returns the async stream iterator. """ + # Sanitize Unicode surrogates to prevent encoding errors + request_data = sanitize_unicode_surrogates(request_data) + kwargs = await self._prepare_client_kwargs_async(llm_config) client = AsyncOpenAI(**kwargs) response_stream: AsyncStream[ResponseStreamEvent] = await client.responses.create(**request_data, stream=True) @@ -958,10 +1083,11 @@ class OpenAIClient(LLMClientBase): return results @trace_method - def handle_llm_error(self, e: Exception) -> Exception: + def handle_llm_error(self, e: Exception, llm_config: Optional[LLMConfig] = None) -> Exception: """ Maps OpenAI-specific errors to common LLMError types. """ + is_byok = (llm_config.provider_category == ProviderCategory.byok) if llm_config else None if isinstance(e, openai.APITimeoutError): timeout_duration = getattr(e, "timeout", "unknown") logger.warning(f"[OpenAI] Request timeout after {timeout_duration} seconds: {e}") @@ -971,6 +1097,7 @@ class OpenAIClient(LLMClientBase): details={ "timeout_duration": timeout_duration, "cause": str(e.__cause__) if e.__cause__ else None, + "is_byok": is_byok, }, ) @@ -979,7 +1106,7 @@ class OpenAIClient(LLMClientBase): return LLMConnectionError( message=f"Failed to connect to OpenAI: {str(e)}", code=ErrorCode.INTERNAL_SERVER_ERROR, - details={"cause": str(e.__cause__) if e.__cause__ else None}, + details={"cause": str(e.__cause__) if e.__cause__ else None, "is_byok": is_byok}, ) # Handle httpx.RemoteProtocolError which can occur during streaming @@ -990,7 +1117,7 @@ class OpenAIClient(LLMClientBase): return LLMConnectionError( message=f"Connection error during OpenAI streaming: {str(e)}", code=ErrorCode.INTERNAL_SERVER_ERROR, - details={"cause": str(e.__cause__) if e.__cause__ else None}, + details={"cause": str(e.__cause__) if e.__cause__ else None, "is_byok": is_byok}, ) # Handle httpx network errors which can occur during streaming @@ -1000,37 +1127,47 @@ class OpenAIClient(LLMClientBase): return LLMConnectionError( message=f"Network error during OpenAI streaming: {str(e)}", code=ErrorCode.INTERNAL_SERVER_ERROR, - details={"cause": str(e.__cause__) if e.__cause__ else None, "error_type": type(e).__name__}, + details={"cause": str(e.__cause__) if e.__cause__ else None, "error_type": type(e).__name__, "is_byok": is_byok}, ) if isinstance(e, openai.RateLimitError): logger.warning(f"[OpenAI] Rate limited (429). Consider backoff. Error: {e}") + body_details = e.body if isinstance(e.body, dict) else {"body": e.body} return LLMRateLimitError( message=f"Rate limited by OpenAI: {str(e)}", code=ErrorCode.RATE_LIMIT_EXCEEDED, - details=e.body, # Include body which often has rate limit details + details={**body_details, "is_byok": is_byok}, ) if isinstance(e, openai.BadRequestError): logger.warning(f"[OpenAI] Bad request (400): {str(e)}") - # BadRequestError can signify different issues (e.g., invalid args, context length) - # Check for context_length_exceeded error code in the error body + error_str = str(e) + + if "= 500: error_cls = LLMServerError @@ -1096,11 +1278,12 @@ class OpenAIClient(LLMClientBase): "status_code": e.status_code, "response": str(e.response), "body": e.body, + "is_byok": is_byok, }, ) # Fallback for unexpected errors - return super().handle_llm_error(e) + return super().handle_llm_error(e, llm_config=llm_config) def fill_image_content_in_messages(openai_message_list: List[dict], pydantic_message_list: List[PydanticMessage]) -> List[dict]: diff --git a/letta/llm_api/sglang_native_client.py b/letta/llm_api/sglang_native_client.py new file mode 100644 index 00000000..ade9adc3 --- /dev/null +++ b/letta/llm_api/sglang_native_client.py @@ -0,0 +1,108 @@ +""" +SGLang Native Client for Letta. + +This client uses SGLang's native /generate endpoint instead of the OpenAI-compatible +/v1/chat/completions endpoint. The native endpoint returns token IDs and per-token +logprobs, which are essential for multi-turn RL training. + +The OpenAI-compatible endpoint only returns token strings, not IDs, making it +impossible to accurately reconstruct the token sequence for training. +""" + +from typing import Any, Dict, Optional + +import httpx + +from letta.log import get_logger + +logger = get_logger(__name__) + + +class SGLangNativeClient: + """Client for SGLang's native /generate endpoint. + + Unlike the OpenAI-compatible endpoint, this returns: + - output_ids: List of token IDs + - output_token_logprobs: List of [logprob, token_id, top_logprob] tuples + + This is essential for RL training where we need exact token IDs, not re-tokenized text. + """ + + def __init__(self, base_url: str, api_key: Optional[str] = None): + """ + Initialize the SGLang native client. + + Args: + base_url: Base URL for SGLang server (e.g., http://localhost:30000) + api_key: Optional API key for authentication + """ + # Remove /v1 suffix if present - native endpoint is at root + self.base_url = base_url.rstrip("/") + if self.base_url.endswith("/v1"): + self.base_url = self.base_url[:-3] + self.api_key = api_key + + async def generate( + self, + text: str, + sampling_params: Optional[Dict[str, Any]] = None, + return_logprob: bool = True, + ) -> Dict[str, Any]: + """ + Call SGLang's native /generate endpoint. + + Args: + text: The formatted prompt text (with chat template applied) + sampling_params: Sampling parameters (temperature, max_new_tokens, etc.) + return_logprob: Whether to return logprobs (default True for RL training) + + Returns: + Response dict with: + - text: Generated text + - output_ids: List of token IDs + - output_token_logprobs: List of [logprob, token_id, top_logprob] tuples + - meta_info: Metadata including finish_reason, prompt_tokens, etc. + + Example response: + { + "text": "Hello! How can I help?", + "output_ids": [9707, 0, 2585, 646, 358, 1492, 30], + "output_token_logprobs": [ + [-0.005, 9707, null], + [0.0, 0, null], + ... + ], + "meta_info": { + "finish_reason": {"type": "stop", "matched": 151645}, + "prompt_tokens": 42, + ... + } + } + """ + headers = {"Content-Type": "application/json"} + if self.api_key: + headers["Authorization"] = f"Bearer {self.api_key}" + + payload = { + "text": text, + "sampling_params": sampling_params or {}, + "return_logprob": return_logprob, + } + + async with httpx.AsyncClient(timeout=300.0) as client: + response = await client.post( + f"{self.base_url}/generate", + json=payload, + headers=headers, + ) + response.raise_for_status() + return response.json() + + async def health_check(self) -> bool: + """Check if the SGLang server is healthy.""" + try: + async with httpx.AsyncClient(timeout=10.0) as client: + response = await client.get(f"{self.base_url}/health") + return response.status_code == 200 + except Exception: + return False diff --git a/letta/llm_api/together_client.py b/letta/llm_api/together_client.py index 98ebf768..8117746a 100644 --- a/letta/llm_api/together_client.py +++ b/letta/llm_api/together_client.py @@ -4,6 +4,7 @@ from typing import List from openai import AsyncOpenAI, OpenAI from openai.types.chat.chat_completion import ChatCompletion +from letta.helpers.json_helpers import sanitize_unicode_surrogates from letta.llm_api.openai_client import OpenAIClient from letta.otel.tracing import trace_method from letta.schemas.embedding_config import EmbeddingConfig @@ -34,6 +35,8 @@ class TogetherClient(OpenAIClient): """ Performs underlying asynchronous request to OpenAI API and returns raw response dict. """ + request_data = sanitize_unicode_surrogates(request_data) + api_key, _, _ = await self.get_byok_overrides_async(llm_config) if not api_key: diff --git a/letta/llm_api/xai_client.py b/letta/llm_api/xai_client.py index 1085e5a6..07249320 100644 --- a/letta/llm_api/xai_client.py +++ b/letta/llm_api/xai_client.py @@ -5,6 +5,7 @@ from openai import AsyncOpenAI, AsyncStream, OpenAI from openai.types.chat.chat_completion import ChatCompletion from openai.types.chat.chat_completion_chunk import ChatCompletionChunk +from letta.helpers.json_helpers import sanitize_unicode_surrogates from letta.llm_api.openai_client import OpenAIClient from letta.otel.tracing import trace_method from letta.schemas.embedding_config import EmbeddingConfig @@ -59,6 +60,8 @@ class XAIClient(OpenAIClient): """ Performs underlying asynchronous request to OpenAI API and returns raw response dict. """ + request_data = sanitize_unicode_surrogates(request_data) + api_key = model_settings.xai_api_key or os.environ.get("XAI_API_KEY") client = AsyncOpenAI(api_key=api_key, base_url=llm_config.model_endpoint) @@ -70,6 +73,8 @@ class XAIClient(OpenAIClient): """ Performs underlying asynchronous streaming request to OpenAI and returns the async stream iterator. """ + request_data = sanitize_unicode_surrogates(request_data) + api_key = model_settings.xai_api_key or os.environ.get("XAI_API_KEY") client = AsyncOpenAI(api_key=api_key, base_url=llm_config.model_endpoint) response_stream: AsyncStream[ChatCompletionChunk] = await client.chat.completions.create( diff --git a/letta/llm_api/zai_client.py b/letta/llm_api/zai_client.py index 9eec79c2..87d577ef 100644 --- a/letta/llm_api/zai_client.py +++ b/letta/llm_api/zai_client.py @@ -1,19 +1,30 @@ -import os from typing import List, Optional from openai import AsyncOpenAI, AsyncStream, OpenAI from openai.types.chat.chat_completion import ChatCompletion from openai.types.chat.chat_completion_chunk import ChatCompletionChunk +from letta.helpers.json_helpers import sanitize_unicode_surrogates from letta.llm_api.openai_client import OpenAIClient from letta.otel.tracing import trace_method from letta.schemas.embedding_config import EmbeddingConfig from letta.schemas.enums import AgentType from letta.schemas.llm_config import LLMConfig from letta.schemas.message import Message as PydanticMessage +from letta.schemas.openai.chat_completion_response import ChatCompletionResponse from letta.settings import model_settings +def is_zai_reasoning_model(model_name: str) -> bool: + """Check if the model is a ZAI reasoning model (GLM-4.5+).""" + return ( + model_name.startswith("glm-4.5") + or model_name.startswith("glm-4.6") + or model_name.startswith("glm-4.7") + or model_name.startswith("glm-5") + ) + + class ZAIClient(OpenAIClient): """Z.ai (ZhipuAI) client - uses OpenAI-compatible API.""" @@ -23,6 +34,10 @@ class ZAIClient(OpenAIClient): def supports_structured_output(self, llm_config: LLMConfig) -> bool: return False + def is_reasoning_model(self, llm_config: LLMConfig) -> bool: + """Returns True if the model is a ZAI reasoning model (GLM-4.5+).""" + return is_zai_reasoning_model(llm_config.model) + @trace_method def build_request_data( self, @@ -35,6 +50,50 @@ class ZAIClient(OpenAIClient): tool_return_truncation_chars: Optional[int] = None, ) -> dict: data = super().build_request_data(agent_type, messages, llm_config, tools, force_tool_call, requires_subsequent_tool_call) + + # Add thinking configuration for ZAI GLM-4.5+ models + # Must explicitly send type: "disabled" when reasoning is off, as GLM-4.7 has thinking on by default + if self.is_reasoning_model(llm_config): + if llm_config.enable_reasoner: + data["extra_body"] = { + "thinking": { + "type": "enabled", + "clear_thinking": False, # Preserved thinking for agents + } + } + else: + data["extra_body"] = { + "thinking": { + "type": "disabled", + } + } + + # Sanitize empty text content — ZAI rejects empty text blocks + if "messages" in data: + for msg in data["messages"]: + content = msg.get("content") if isinstance(msg, dict) else getattr(msg, "content", None) + # String content: replace empty with None (assistant+tool_calls) or "." + if isinstance(content, str) and not content.strip(): + role = msg.get("role") if isinstance(msg, dict) else getattr(msg, "role", None) + has_tool_calls = msg.get("tool_calls") if isinstance(msg, dict) else getattr(msg, "tool_calls", None) + if role == "assistant" and has_tool_calls: + # assistant + tool_calls: null content is valid in OpenAI format + if isinstance(msg, dict): + msg["content"] = None + else: + msg.content = None + else: + if isinstance(msg, dict): + msg["content"] = "." + else: + msg.content = "." + # List content: fix empty text blocks within arrays + elif isinstance(content, list): + for block in content: + if isinstance(block, dict) and block.get("type") == "text": + if not block.get("text", "").strip(): + block["text"] = "." + return data @trace_method @@ -53,6 +112,8 @@ class ZAIClient(OpenAIClient): """ Performs underlying asynchronous request to Z.ai API and returns raw response dict. """ + request_data = sanitize_unicode_surrogates(request_data) + api_key = model_settings.zai_api_key client = AsyncOpenAI(api_key=api_key, base_url=llm_config.model_endpoint) @@ -64,6 +125,8 @@ class ZAIClient(OpenAIClient): """ Performs underlying asynchronous streaming request to Z.ai and returns the async stream iterator. """ + request_data = sanitize_unicode_surrogates(request_data) + api_key = model_settings.zai_api_key client = AsyncOpenAI(api_key=api_key, base_url=llm_config.model_endpoint) response_stream: AsyncStream[ChatCompletionChunk] = await client.chat.completions.create( @@ -79,3 +142,39 @@ class ZAIClient(OpenAIClient): response = await client.embeddings.create(model=embedding_config.embedding_model, input=inputs) return [r.embedding for r in response.data] + + @trace_method + async def convert_response_to_chat_completion( + self, + response_data: dict, + input_messages: List[PydanticMessage], + llm_config: LLMConfig, + ) -> ChatCompletionResponse: + """ + Converts raw ZAI response dict into the ChatCompletionResponse Pydantic model. + Handles extraction of reasoning_content from ZAI GLM-4.5+ responses. + """ + # Use parent class conversion first + chat_completion_response = await super().convert_response_to_chat_completion(response_data, input_messages, llm_config) + + # Parse reasoning_content from ZAI responses (similar to OpenAI pattern) + # ZAI returns reasoning_content in delta.reasoning_content (streaming) or message.reasoning_content + if ( + chat_completion_response.choices + and len(chat_completion_response.choices) > 0 + and chat_completion_response.choices[0].message + and not chat_completion_response.choices[0].message.reasoning_content + ): + if "choices" in response_data and len(response_data["choices"]) > 0: + choice_data = response_data["choices"][0] + if "message" in choice_data and "reasoning_content" in choice_data["message"]: + reasoning_content = choice_data["message"]["reasoning_content"] + if reasoning_content: + chat_completion_response.choices[0].message.reasoning_content = reasoning_content + chat_completion_response.choices[0].message.reasoning_content_signature = None + + # If we used a reasoning model, mark that reasoning content was used + if self.is_reasoning_model(llm_config) and llm_config.enable_reasoner: + chat_completion_response.choices[0].message.omitted_reasoning_content = True + + return chat_completion_response diff --git a/letta/local_llm/chat_completion_proxy.py b/letta/local_llm/chat_completion_proxy.py index 1129b125..668938c3 100644 --- a/letta/local_llm/chat_completion_proxy.py +++ b/letta/local_llm/chat_completion_proxy.py @@ -16,7 +16,7 @@ from letta.local_llm.llamacpp.api import get_llamacpp_completion from letta.local_llm.llm_chat_completion_wrappers import simple_summary_wrapper from letta.local_llm.lmstudio.api import get_lmstudio_completion, get_lmstudio_completion_chatcompletions from letta.local_llm.ollama.api import get_ollama_completion -from letta.local_llm.utils import count_tokens, get_available_wrappers +from letta.local_llm.utils import get_available_wrappers from letta.local_llm.vllm.api import get_vllm_completion from letta.local_llm.webui.api import get_webui_completion from letta.local_llm.webui.legacy_api import get_webui_completion as get_webui_completion_legacy @@ -177,7 +177,7 @@ def get_chat_completion( raise LocalLLMError( f"Invalid endpoint type {endpoint_type}, please set variable depending on your backend (webui, lmstudio, llamacpp, koboldcpp)" ) - except requests.exceptions.ConnectionError as e: + except requests.exceptions.ConnectionError: raise LocalLLMConnectionError(f"Unable to connect to endpoint {endpoint}") attributes = usage if isinstance(usage, dict) else {"usage": usage} @@ -207,10 +207,12 @@ def get_chat_completion( if usage["prompt_tokens"] is None: printd("usage dict was missing prompt_tokens, computing on-the-fly...") - usage["prompt_tokens"] = count_tokens(prompt) + # Approximate token count: bytes / 4 + usage["prompt_tokens"] = len(prompt.encode("utf-8")) // 4 # NOTE: we should compute on-the-fly anyways since we might have to correct for errors during JSON parsing - usage["completion_tokens"] = count_tokens(json_dumps(chat_completion_result)) + # Approximate token count: bytes / 4 + usage["completion_tokens"] = len(json_dumps(chat_completion_result).encode("utf-8")) // 4 """ if usage["completion_tokens"] is None: printd(f"usage dict was missing completion_tokens, computing on-the-fly...") diff --git a/letta/local_llm/constants.py b/letta/local_llm/constants.py index 19fce8e8..c4973717 100644 --- a/letta/local_llm/constants.py +++ b/letta/local_llm/constants.py @@ -2,7 +2,7 @@ # (settings.py imports from this module indirectly through log.py) # Import this here to avoid circular dependency at module level from letta.local_llm.llm_chat_completion_wrappers.chatml import ChatMLInnerMonologueWrapper -from letta.settings import DEFAULT_WRAPPER_NAME, INNER_THOUGHTS_KWARG +from letta.settings import INNER_THOUGHTS_KWARG DEFAULT_WRAPPER = ChatMLInnerMonologueWrapper INNER_THOUGHTS_KWARG_VERTEX = "thinking" diff --git a/letta/local_llm/grammars/gbnf_grammar_generator.py b/letta/local_llm/grammars/gbnf_grammar_generator.py index 536bf8e2..0b35e44f 100644 --- a/letta/local_llm/grammars/gbnf_grammar_generator.py +++ b/letta/local_llm/grammars/gbnf_grammar_generator.py @@ -5,7 +5,7 @@ from copy import copy from enum import Enum from inspect import getdoc, isclass from types import NoneType -from typing import Any, Callable, List, Optional, Tuple, Type, Union, _GenericAlias, get_args, get_origin +from typing import Any, Callable, List, Optional, Tuple, Type, Union, _GenericAlias, get_args, get_origin # type: ignore[attr-defined] from docstring_parser import parse from pydantic import BaseModel, create_model @@ -58,13 +58,13 @@ def map_pydantic_type_to_gbnf(pydantic_type: Type[Any]) -> str: elif isclass(pydantic_type) and issubclass(pydantic_type, BaseModel): return format_model_and_field_name(pydantic_type.__name__) - elif get_origin(pydantic_type) == list: + elif get_origin(pydantic_type) is list: element_type = get_args(pydantic_type)[0] return f"{map_pydantic_type_to_gbnf(element_type)}-list" - elif get_origin(pydantic_type) == set: + elif get_origin(pydantic_type) is set: element_type = get_args(pydantic_type)[0] return f"{map_pydantic_type_to_gbnf(element_type)}-set" - elif get_origin(pydantic_type) == Union: + elif get_origin(pydantic_type) is Union: union_types = get_args(pydantic_type) union_rules = [map_pydantic_type_to_gbnf(ut) for ut in union_types] return f"union-{'-or-'.join(union_rules)}" @@ -73,7 +73,7 @@ def map_pydantic_type_to_gbnf(pydantic_type: Type[Any]) -> str: return f"optional-{map_pydantic_type_to_gbnf(element_type)}" elif isclass(pydantic_type): return f"{PydanticDataType.CUSTOM_CLASS.value}-{format_model_and_field_name(pydantic_type.__name__)}" - elif get_origin(pydantic_type) == dict: + elif get_origin(pydantic_type) is dict: key_type, value_type = get_args(pydantic_type) return f"custom-dict-key-type-{format_model_and_field_name(map_pydantic_type_to_gbnf(key_type))}-value-type-{format_model_and_field_name(map_pydantic_type_to_gbnf(value_type))}" else: @@ -299,7 +299,7 @@ def generate_gbnf_rule_for_type( enum_rule = f"{model_name}-{field_name} ::= {' | '.join(enum_values)}" rules.append(enum_rule) gbnf_type, rules = model_name + "-" + field_name, rules - elif get_origin(field_type) == list: # Array + elif get_origin(field_type) is list: # Array element_type = get_args(field_type)[0] element_rule_name, additional_rules = generate_gbnf_rule_for_type( model_name, f"{field_name}-element", element_type, is_optional, processed_models, created_rules @@ -309,7 +309,7 @@ def generate_gbnf_rule_for_type( rules.append(array_rule) gbnf_type, rules = model_name + "-" + field_name, rules - elif get_origin(field_type) == set or field_type == set: # Array + elif get_origin(field_type) is set or field_type is set: # Array element_type = get_args(field_type)[0] element_rule_name, additional_rules = generate_gbnf_rule_for_type( model_name, f"{field_name}-element", element_type, is_optional, processed_models, created_rules @@ -320,7 +320,7 @@ def generate_gbnf_rule_for_type( gbnf_type, rules = model_name + "-" + field_name, rules elif gbnf_type.startswith("custom-class-"): - nested_model_rules, field_types = get_members_structure(field_type, gbnf_type) + nested_model_rules, _field_types = get_members_structure(field_type, gbnf_type) rules.append(nested_model_rules) elif gbnf_type.startswith("custom-dict-"): key_type, value_type = get_args(field_type) @@ -502,15 +502,15 @@ def generate_gbnf_grammar(model: Type[BaseModel], processed_models: set, created model_rule += '"\\n" ws "}"' model_rule += '"\\n" markdown-code-block' has_special_string = True - all_rules = [model_rule] + nested_rules + all_rules = [model_rule, *nested_rules] return all_rules, has_special_string def generate_gbnf_grammar_from_pydantic_models( models: List[Type[BaseModel]], - outer_object_name: str = None, - outer_object_content: str = None, + outer_object_name: str | None = None, + outer_object_content: str | None = None, list_of_outputs: bool = False, add_inner_thoughts: bool = False, allow_only_inner_thoughts: bool = False, @@ -704,11 +704,11 @@ def generate_markdown_documentation( # continue if isclass(field_type) and issubclass(field_type, BaseModel): pyd_models.append((field_type, False)) - if get_origin(field_type) == list: + if get_origin(field_type) is list: element_type = get_args(field_type)[0] if isclass(element_type) and issubclass(element_type, BaseModel): pyd_models.append((element_type, False)) - if get_origin(field_type) == Union: + if get_origin(field_type) is Union: element_types = get_args(field_type) for element_type in element_types: if isclass(element_type) and issubclass(element_type, BaseModel): @@ -747,14 +747,14 @@ def generate_field_markdown( field_info = model.model_fields.get(field_name) field_description = field_info.description if field_info and field_info.description else "" - if get_origin(field_type) == list: + if get_origin(field_type) is list: element_type = get_args(field_type)[0] field_text = f"{indent}{field_name} ({field_type.__name__} of {element_type.__name__})" if field_description != "": field_text += ": " else: field_text += "\n" - elif get_origin(field_type) == Union: + elif get_origin(field_type) is Union: element_types = get_args(field_type) types = [] for element_type in element_types: @@ -857,11 +857,11 @@ def generate_text_documentation( for name, field_type in model.__annotations__.items(): # if name == "markdown_code_block": # continue - if get_origin(field_type) == list: + if get_origin(field_type) is list: element_type = get_args(field_type)[0] if isclass(element_type) and issubclass(element_type, BaseModel): pyd_models.append((element_type, False)) - if get_origin(field_type) == Union: + if get_origin(field_type) is Union: element_types = get_args(field_type) for element_type in element_types: if isclass(element_type) and issubclass(element_type, BaseModel): @@ -905,14 +905,14 @@ def generate_field_text( field_info = model.model_fields.get(field_name) field_description = field_info.description if field_info and field_info.description else "" - if get_origin(field_type) == list: + if get_origin(field_type) is list: element_type = get_args(field_type)[0] field_text = f"{indent}{field_name} ({format_model_and_field_name(field_type.__name__)} of {format_model_and_field_name(element_type.__name__)})" if field_description != "": field_text += ":\n" else: field_text += "\n" - elif get_origin(field_type) == Union: + elif get_origin(field_type) is Union: element_types = get_args(field_type) types = [] for element_type in element_types: @@ -1015,8 +1015,8 @@ def generate_and_save_gbnf_grammar_and_documentation( pydantic_model_list, grammar_file_path="./generated_grammar.gbnf", documentation_file_path="./generated_grammar_documentation.md", - outer_object_name: str = None, - outer_object_content: str = None, + outer_object_name: str | None = None, + outer_object_content: str | None = None, model_prefix: str = "Output Model", fields_prefix: str = "Output Fields", list_of_outputs: bool = False, @@ -1049,8 +1049,8 @@ def generate_and_save_gbnf_grammar_and_documentation( def generate_gbnf_grammar_and_documentation( pydantic_model_list, - outer_object_name: str = None, - outer_object_content: str = None, + outer_object_name: str | None = None, + outer_object_content: str | None = None, model_prefix: str = "Output Model", fields_prefix: str = "Output Fields", list_of_outputs: bool = False, @@ -1087,8 +1087,8 @@ def generate_gbnf_grammar_and_documentation( def generate_gbnf_grammar_and_documentation_from_dictionaries( dictionaries: List[dict], - outer_object_name: str = None, - outer_object_content: str = None, + outer_object_name: str | None = None, + outer_object_content: str | None = None, model_prefix: str = "Output Model", fields_prefix: str = "Output Fields", list_of_outputs: bool = False, diff --git a/letta/local_llm/koboldcpp/api.py b/letta/local_llm/koboldcpp/api.py index e3aee69d..72c3cf06 100644 --- a/letta/local_llm/koboldcpp/api.py +++ b/letta/local_llm/koboldcpp/api.py @@ -1,7 +1,7 @@ from urllib.parse import urljoin from letta.local_llm.settings.settings import get_completions_settings -from letta.local_llm.utils import count_tokens, post_json_auth_request +from letta.local_llm.utils import post_json_auth_request KOBOLDCPP_API_SUFFIX = "/api/v1/generate" @@ -10,7 +10,8 @@ def get_koboldcpp_completion(endpoint, auth_type, auth_key, prompt, context_wind """See https://lite.koboldai.net/koboldcpp_api for API spec""" from letta.utils import printd - prompt_tokens = count_tokens(prompt) + # Approximate token count: bytes / 4 + prompt_tokens = len(prompt.encode("utf-8")) // 4 if prompt_tokens > context_window: raise Exception(f"Request exceeds maximum context length ({prompt_tokens} > {context_window} tokens)") diff --git a/letta/local_llm/llamacpp/api.py b/letta/local_llm/llamacpp/api.py index e5d24eea..9ca7e1f6 100644 --- a/letta/local_llm/llamacpp/api.py +++ b/letta/local_llm/llamacpp/api.py @@ -1,7 +1,7 @@ from urllib.parse import urljoin from letta.local_llm.settings.settings import get_completions_settings -from letta.local_llm.utils import count_tokens, post_json_auth_request +from letta.local_llm.utils import post_json_auth_request LLAMACPP_API_SUFFIX = "/completion" @@ -10,7 +10,8 @@ def get_llamacpp_completion(endpoint, auth_type, auth_key, prompt, context_windo """See https://github.com/ggerganov/llama.cpp/blob/master/examples/server/README.md for instructions on how to run the LLM web server""" from letta.utils import printd - prompt_tokens = count_tokens(prompt) + # Approximate token count: bytes / 4 + prompt_tokens = len(prompt.encode("utf-8")) // 4 if prompt_tokens > context_window: raise Exception(f"Request exceeds maximum context length ({prompt_tokens} > {context_window} tokens)") diff --git a/letta/local_llm/llm_chat_completion_wrappers/airoboros.py b/letta/local_llm/llm_chat_completion_wrappers/airoboros.py index 544d11d4..5a3223fb 100644 --- a/letta/local_llm/llm_chat_completion_wrappers/airoboros.py +++ b/letta/local_llm/llm_chat_completion_wrappers/airoboros.py @@ -130,12 +130,12 @@ class Airoboros21Wrapper(LLMChatCompletionWrapper): content_json = json_loads(message["content"]) content_simple = content_json["message"] prompt += f"\nUSER: {content_simple}" - except: + except Exception: prompt += f"\nUSER: {message['content']}" elif message["role"] == "assistant": prompt += f"\nASSISTANT: {message['content']}" # need to add the function call if there was one - if "function_call" in message and message["function_call"]: + if message.get("function_call"): prompt += f"\n{create_function_call(message['function_call'])}" elif message["role"] in ["function", "tool"]: # TODO find a good way to add this @@ -348,7 +348,7 @@ class Airoboros21InnerMonologueWrapper(Airoboros21Wrapper): content_json = json_loads(message["content"]) content_simple = content_json["message"] prompt += f"\n{user_prefix}: {content_simple}" - except: + except Exception: prompt += f"\n{user_prefix}: {message['content']}" elif message["role"] == "assistant": # Support for AutoGen naming of agents @@ -360,7 +360,7 @@ class Airoboros21InnerMonologueWrapper(Airoboros21Wrapper): prompt += f"\n{assistant_prefix}:" # need to add the function call if there was one inner_thoughts = message["content"] - if "function_call" in message and message["function_call"]: + if message.get("function_call"): prompt += f"\n{create_function_call(message['function_call'], inner_thoughts=inner_thoughts)}" elif message["role"] in ["function", "tool"]: # TODO find a good way to add this diff --git a/letta/local_llm/llm_chat_completion_wrappers/chatml.py b/letta/local_llm/llm_chat_completion_wrappers/chatml.py index 71589959..75c6b411 100644 --- a/letta/local_llm/llm_chat_completion_wrappers/chatml.py +++ b/letta/local_llm/llm_chat_completion_wrappers/chatml.py @@ -143,9 +143,9 @@ class ChatMLInnerMonologueWrapper(LLMChatCompletionWrapper): # need to add the function call if there was one inner_thoughts = message["content"] - if "function_call" in message and message["function_call"]: + if message.get("function_call"): prompt += f"\n{self._compile_function_call(message['function_call'], inner_thoughts=inner_thoughts)}" - elif "tool_calls" in message and message["tool_calls"]: + elif message.get("tool_calls"): for tool_call in message["tool_calls"]: prompt += f"\n{self._compile_function_call(tool_call['function'], inner_thoughts=inner_thoughts)}" else: @@ -163,14 +163,14 @@ class ChatMLInnerMonologueWrapper(LLMChatCompletionWrapper): try: user_msg_json = json_loads(message["content"]) user_msg_str = user_msg_json["message"] - except: + except Exception: user_msg_str = message["content"] else: # Otherwise just dump the full json try: user_msg_json = json_loads(message["content"]) user_msg_str = json_dumps(user_msg_json, indent=self.json_indent) - except: + except Exception: user_msg_str = message["content"] prompt += user_msg_str @@ -185,7 +185,7 @@ class ChatMLInnerMonologueWrapper(LLMChatCompletionWrapper): # indent the function replies function_return_dict = json_loads(message["content"]) function_return_str = json_dumps(function_return_dict, indent=0) - except: + except Exception: function_return_str = message["content"] prompt += function_return_str @@ -218,7 +218,7 @@ class ChatMLInnerMonologueWrapper(LLMChatCompletionWrapper): msg_json = json_loads(message["content"]) if msg_json["type"] != "user_message": role_str = "system" - except: + except Exception: pass prompt += f"\n<|im_start|>{role_str}\n{msg_str.strip()}<|im_end|>" diff --git a/letta/local_llm/llm_chat_completion_wrappers/configurable_wrapper.py b/letta/local_llm/llm_chat_completion_wrappers/configurable_wrapper.py index 9f53fa83..842eab52 100644 --- a/letta/local_llm/llm_chat_completion_wrappers/configurable_wrapper.py +++ b/letta/local_llm/llm_chat_completion_wrappers/configurable_wrapper.py @@ -141,9 +141,9 @@ class ConfigurableJSONWrapper(LLMChatCompletionWrapper): # need to add the function call if there was one inner_thoughts = message["content"] - if "function_call" in message and message["function_call"]: + if message.get("function_call"): prompt += f"\n{self._compile_function_call(message['function_call'], inner_thoughts=inner_thoughts)}" - elif "tool_calls" in message and message["tool_calls"]: + elif message.get("tool_calls"): for tool_call in message["tool_calls"]: prompt += f"\n{self._compile_function_call(tool_call['function'], inner_thoughts=inner_thoughts)}" else: @@ -161,14 +161,14 @@ class ConfigurableJSONWrapper(LLMChatCompletionWrapper): try: user_msg_json = json_loads(message["content"]) user_msg_str = user_msg_json["message"] - except: + except Exception: user_msg_str = message["content"] else: # Otherwise just dump the full json try: user_msg_json = json_loads(message["content"]) user_msg_str = json_dumps(user_msg_json, indent=self.json_indent) - except: + except Exception: user_msg_str = message["content"] prompt += user_msg_str @@ -183,7 +183,7 @@ class ConfigurableJSONWrapper(LLMChatCompletionWrapper): # indent the function replies function_return_dict = json_loads(message["content"]) function_return_str = json_dumps(function_return_dict, indent=0) - except: + except Exception: function_return_str = message["content"] prompt += function_return_str diff --git a/letta/local_llm/llm_chat_completion_wrappers/dolphin.py b/letta/local_llm/llm_chat_completion_wrappers/dolphin.py index e393d9b1..ff7d05f3 100644 --- a/letta/local_llm/llm_chat_completion_wrappers/dolphin.py +++ b/letta/local_llm/llm_chat_completion_wrappers/dolphin.py @@ -158,7 +158,7 @@ class Dolphin21MistralWrapper(LLMChatCompletionWrapper): content_simple = content_json["message"] prompt += f"\n{IM_START_TOKEN}user\n{content_simple}{IM_END_TOKEN}" # prompt += f"\nUSER: {content_simple}" - except: + except Exception: prompt += f"\n{IM_START_TOKEN}user\n{message['content']}{IM_END_TOKEN}" # prompt += f"\nUSER: {message['content']}" elif message["role"] == "assistant": @@ -167,7 +167,7 @@ class Dolphin21MistralWrapper(LLMChatCompletionWrapper): prompt += f"\n{message['content']}" # prompt += f"\nASSISTANT: {message['content']}" # need to add the function call if there was one - if "function_call" in message and message["function_call"]: + if message.get("function_call"): prompt += f"\n{create_function_call(message['function_call'])}" prompt += f"{IM_END_TOKEN}" elif message["role"] in ["function", "tool"]: diff --git a/letta/local_llm/llm_chat_completion_wrappers/llama3.py b/letta/local_llm/llm_chat_completion_wrappers/llama3.py index 12153209..49506d6c 100644 --- a/letta/local_llm/llm_chat_completion_wrappers/llama3.py +++ b/letta/local_llm/llm_chat_completion_wrappers/llama3.py @@ -142,9 +142,9 @@ class LLaMA3InnerMonologueWrapper(LLMChatCompletionWrapper): # need to add the function call if there was one inner_thoughts = message["content"] - if "function_call" in message and message["function_call"]: + if message.get("function_call"): prompt += f"\n{self._compile_function_call(message['function_call'], inner_thoughts=inner_thoughts)}" - elif "tool_calls" in message and message["tool_calls"]: + elif message.get("tool_calls"): for tool_call in message["tool_calls"]: prompt += f"\n{self._compile_function_call(tool_call['function'], inner_thoughts=inner_thoughts)}" else: @@ -162,7 +162,7 @@ class LLaMA3InnerMonologueWrapper(LLMChatCompletionWrapper): try: user_msg_json = json_loads(message["content"]) user_msg_str = user_msg_json["message"] - except: + except Exception: user_msg_str = message["content"] else: # Otherwise just dump the full json @@ -172,7 +172,7 @@ class LLaMA3InnerMonologueWrapper(LLMChatCompletionWrapper): user_msg_json, indent=self.json_indent, ) - except: + except Exception: user_msg_str = message["content"] prompt += user_msg_str @@ -190,7 +190,7 @@ class LLaMA3InnerMonologueWrapper(LLMChatCompletionWrapper): function_return_dict, indent=self.json_indent, ) - except: + except Exception: function_return_str = message["content"] prompt += function_return_str @@ -223,7 +223,7 @@ class LLaMA3InnerMonologueWrapper(LLMChatCompletionWrapper): msg_json = json_loads(message["content"]) if msg_json["type"] != "user_message": role_str = "system" - except: + except Exception: pass prompt += f"\n<|start_header_id|>{role_str}<|end_header_id|>\n\n{msg_str.strip()}<|eot_id|>" diff --git a/letta/local_llm/llm_chat_completion_wrappers/simple_summary_wrapper.py b/letta/local_llm/llm_chat_completion_wrappers/simple_summary_wrapper.py index d20bd2d3..ca77c9ea 100644 --- a/letta/local_llm/llm_chat_completion_wrappers/simple_summary_wrapper.py +++ b/letta/local_llm/llm_chat_completion_wrappers/simple_summary_wrapper.py @@ -101,14 +101,14 @@ class SimpleSummaryWrapper(LLMChatCompletionWrapper): content_json = json_loads(message["content"]) content_simple = content_json["message"] prompt += f"\nUSER: {content_simple}" - except: + except Exception: prompt += f"\nUSER: {message['content']}" elif message["role"] == "assistant": prompt += f"\nASSISTANT: {message['content']}" # need to add the function call if there was one - if "function_call" in message and message["function_call"]: + if message.get("function_call"): prompt += f"\n{create_function_call(message['function_call'])}" - elif "tool_calls" in message and message["tool_calls"]: + elif message.get("tool_calls"): prompt += f"\n{create_function_call(message['tool_calls'][0]['function'])}" elif message["role"] in ["function", "tool"]: # TODO find a good way to add this diff --git a/letta/local_llm/llm_chat_completion_wrappers/zephyr.py b/letta/local_llm/llm_chat_completion_wrappers/zephyr.py index 8ee733aa..186336ee 100644 --- a/letta/local_llm/llm_chat_completion_wrappers/zephyr.py +++ b/letta/local_llm/llm_chat_completion_wrappers/zephyr.py @@ -88,7 +88,7 @@ class ZephyrMistralWrapper(LLMChatCompletionWrapper): content_simple = content_json["message"] prompt += f"\n<|user|>\n{content_simple}{IM_END_TOKEN}" # prompt += f"\nUSER: {content_simple}" - except: + except Exception: prompt += f"\n<|user|>\n{message['content']}{IM_END_TOKEN}" # prompt += f"\nUSER: {message['content']}" elif message["role"] == "assistant": @@ -97,7 +97,7 @@ class ZephyrMistralWrapper(LLMChatCompletionWrapper): prompt += f"\n{message['content']}" # prompt += f"\nASSISTANT: {message['content']}" # need to add the function call if there was one - if "function_call" in message and message["function_call"]: + if message.get("function_call"): prompt += f"\n{create_function_call(message['function_call'])}" prompt += f"{IM_END_TOKEN}" elif message["role"] in ["function", "tool"]: @@ -256,7 +256,7 @@ class ZephyrMistralInnerMonologueWrapper(ZephyrMistralWrapper): content_json = json_loads(message["content"]) content_simple = content_json["message"] prompt += f"\n<|user|>\n{content_simple}{IM_END_TOKEN}" - except: + except Exception: prompt += f"\n<|user|>\n{message['content']}{IM_END_TOKEN}" elif message["role"] == "assistant": prompt += "\n<|assistant|>" diff --git a/letta/local_llm/lmstudio/api.py b/letta/local_llm/lmstudio/api.py index dd0debee..155f5b26 100644 --- a/letta/local_llm/lmstudio/api.py +++ b/letta/local_llm/lmstudio/api.py @@ -3,7 +3,6 @@ from urllib.parse import urljoin from letta.local_llm.settings.settings import get_completions_settings from letta.local_llm.utils import post_json_auth_request -from letta.utils import count_tokens LMSTUDIO_API_CHAT_SUFFIX = "/v1/chat/completions" LMSTUDIO_API_COMPLETIONS_SUFFIX = "/v1/completions" @@ -80,7 +79,8 @@ def get_lmstudio_completion(endpoint, auth_type, auth_key, prompt, context_windo """Based on the example for using LM Studio as a backend from https://github.com/lmstudio-ai/examples/tree/main/Hello%2C%20world%20-%20OpenAI%20python%20client""" from letta.utils import printd - prompt_tokens = count_tokens(prompt) + # Approximate token count: bytes / 4 + prompt_tokens = len(prompt.encode("utf-8")) // 4 if prompt_tokens > context_window: raise Exception(f"Request exceeds maximum context length ({prompt_tokens} > {context_window} tokens)") diff --git a/letta/local_llm/ollama/api.py b/letta/local_llm/ollama/api.py index 69926a43..4c6508d8 100644 --- a/letta/local_llm/ollama/api.py +++ b/letta/local_llm/ollama/api.py @@ -3,7 +3,6 @@ from urllib.parse import urljoin from letta.errors import LocalLLMError from letta.local_llm.settings.settings import get_completions_settings from letta.local_llm.utils import post_json_auth_request -from letta.utils import count_tokens OLLAMA_API_SUFFIX = "/api/generate" @@ -12,7 +11,8 @@ def get_ollama_completion(endpoint, auth_type, auth_key, model, prompt, context_ """See https://github.com/jmorganca/ollama/blob/main/docs/api.md for instructions on how to run the LLM web server""" from letta.utils import printd - prompt_tokens = count_tokens(prompt) + # Approximate token count: bytes / 4 + prompt_tokens = len(prompt.encode("utf-8")) // 4 if prompt_tokens > context_window: raise Exception(f"Request exceeds maximum context length ({prompt_tokens} > {context_window} tokens)") diff --git a/letta/local_llm/settings/settings.py b/letta/local_llm/settings/settings.py index 3671e30b..18ecb6e7 100644 --- a/letta/local_llm/settings/settings.py +++ b/letta/local_llm/settings/settings.py @@ -46,7 +46,7 @@ def get_completions_settings(defaults="simple") -> dict: with open(settings_file, "r", encoding="utf-8") as file: user_settings = json.load(file) if len(user_settings) > 0: - printd(f"Updating base settings with the following user settings:\n{json_dumps(user_settings, indent=2)}") + printd(f"Updating base settings with the following user settings:\n{json.dumps(user_settings, indent=2)}") settings.update(user_settings) else: printd(f"'{settings_file}' was empty, ignoring...") diff --git a/letta/local_llm/utils.py b/letta/local_llm/utils.py index 0bbfcb10..905736c2 100644 --- a/letta/local_llm/utils.py +++ b/letta/local_llm/utils.py @@ -25,15 +25,15 @@ def post_json_auth_request(uri, json_payload, auth_type, auth_key): # Used by OpenAI, together.ai, Mistral AI elif auth_type == "bearer_token": - if auth_key is None: - raise ValueError(f"auth_type is {auth_type}, but auth_key is null") + if not auth_key: + raise ValueError(f"auth_type is {auth_type}, but auth_key is null or empty") headers = {"Content-Type": "application/json", "Authorization": f"Bearer {auth_key}"} response = requests.post(uri, json=json_payload, headers=headers) # Used by OpenAI Azure elif auth_type == "api_key": - if auth_key is None: - raise ValueError(f"auth_type is {auth_type}, but auth_key is null") + if not auth_key: + raise ValueError(f"auth_type is {auth_type}, but auth_key is null or empty") headers = {"Content-Type": "application/json", "api-key": f"{auth_key}"} response = requests.post(uri, json=json_payload, headers=headers) diff --git a/letta/local_llm/vllm/api.py b/letta/local_llm/vllm/api.py index dde863c8..245a176d 100644 --- a/letta/local_llm/vllm/api.py +++ b/letta/local_llm/vllm/api.py @@ -1,7 +1,7 @@ from urllib.parse import urljoin from letta.local_llm.settings.settings import get_completions_settings -from letta.local_llm.utils import count_tokens, post_json_auth_request +from letta.local_llm.utils import post_json_auth_request WEBUI_API_SUFFIX = "/completions" @@ -10,7 +10,8 @@ def get_vllm_completion(endpoint, auth_type, auth_key, model, prompt, context_wi """https://github.com/vllm-project/vllm/blob/main/examples/api_client.py""" from letta.utils import printd - prompt_tokens = count_tokens(prompt) + # Approximate token count: bytes / 4 + prompt_tokens = len(prompt.encode("utf-8")) // 4 if prompt_tokens > context_window: raise Exception(f"Request exceeds maximum context length ({prompt_tokens} > {context_window} tokens)") diff --git a/letta/local_llm/webui/api.py b/letta/local_llm/webui/api.py index 7c4a0967..46323dfd 100644 --- a/letta/local_llm/webui/api.py +++ b/letta/local_llm/webui/api.py @@ -1,7 +1,7 @@ from urllib.parse import urljoin from letta.local_llm.settings.settings import get_completions_settings -from letta.local_llm.utils import count_tokens, post_json_auth_request +from letta.local_llm.utils import post_json_auth_request WEBUI_API_SUFFIX = "/v1/completions" @@ -10,7 +10,8 @@ def get_webui_completion(endpoint, auth_type, auth_key, prompt, context_window, """Compatibility for the new OpenAI API: https://github.com/oobabooga/text-generation-webui/wiki/12-%E2%80%90-OpenAI-API#examples""" from letta.utils import printd - prompt_tokens = count_tokens(prompt) + # Approximate token count: bytes / 4 + prompt_tokens = len(prompt.encode("utf-8")) // 4 if prompt_tokens > context_window: raise Exception(f"Request exceeds maximum context length ({prompt_tokens} > {context_window} tokens)") diff --git a/letta/local_llm/webui/legacy_api.py b/letta/local_llm/webui/legacy_api.py index 01403c1f..c8337180 100644 --- a/letta/local_llm/webui/legacy_api.py +++ b/letta/local_llm/webui/legacy_api.py @@ -1,7 +1,7 @@ from urllib.parse import urljoin from letta.local_llm.settings.settings import get_completions_settings -from letta.local_llm.utils import count_tokens, post_json_auth_request +from letta.local_llm.utils import post_json_auth_request WEBUI_API_SUFFIX = "/api/v1/generate" @@ -10,7 +10,8 @@ def get_webui_completion(endpoint, auth_type, auth_key, prompt, context_window, """See https://github.com/oobabooga/text-generation-webui for instructions on how to run the LLM web server""" from letta.utils import printd - prompt_tokens = count_tokens(prompt) + # Approximate token count: bytes / 4 + prompt_tokens = len(prompt.encode("utf-8")) // 4 if prompt_tokens > context_window: raise Exception(f"Request exceeds maximum context length ({prompt_tokens} > {context_window} tokens)") diff --git a/letta/main.py b/letta/main.py index 5c156571..204f0c6e 100644 --- a/letta/main.py +++ b/letta/main.py @@ -1,5 +1,3 @@ -import os - import typer from letta.cli.cli import server diff --git a/letta/memory.py b/letta/memory.py deleted file mode 100644 index 1303ac55..00000000 --- a/letta/memory.py +++ /dev/null @@ -1,109 +0,0 @@ -from typing import TYPE_CHECKING, Callable, Dict, List - -from letta.constants import MESSAGE_SUMMARY_REQUEST_ACK -from letta.llm_api.llm_api_tools import create -from letta.llm_api.llm_client import LLMClient -from letta.otel.tracing import trace_method -from letta.prompts.gpt_summarize import SYSTEM as SUMMARY_PROMPT_SYSTEM -from letta.schemas.agent import AgentState -from letta.schemas.enums import MessageRole -from letta.schemas.letta_message_content import TextContent -from letta.schemas.memory import Memory -from letta.schemas.message import Message -from letta.settings import summarizer_settings -from letta.utils import count_tokens, printd - -if TYPE_CHECKING: - from letta.orm import User - - -def get_memory_functions(cls: Memory) -> Dict[str, Callable]: - """Get memory functions for a memory class""" - functions = {} - - # collect base memory functions (should not be included) - base_functions = [] - for func_name in dir(Memory): - funct = getattr(Memory, func_name) - if callable(funct): - base_functions.append(func_name) - - for func_name in dir(cls): - if func_name.startswith("_") or func_name in ["load", "to_dict"]: # skip base functions - continue - if func_name in base_functions: # dont use BaseMemory functions - continue - func = getattr(cls, func_name) - if not callable(func): # not a function - continue - functions[func_name] = func - return functions - - -def _format_summary_history(message_history: List[Message]): - # TODO use existing prompt formatters for this (eg ChatML) - def get_message_text(content): - if content and len(content) == 1 and isinstance(content[0], TextContent): - return content[0].text - return "" - - return "\n".join([f"{m.role}: {get_message_text(m.content)}" for m in message_history]) - - -# @trace_method -# def summarize_messages( -# agent_state: AgentState, -# message_sequence_to_summarize: List[Message], -# actor: "User", -# ): -# """Summarize a message sequence using GPT""" -# # we need the context_window -# context_window = agent_state.llm_config.context_window -# -# summary_prompt = SUMMARY_PROMPT_SYSTEM -# summary_input = _format_summary_history(message_sequence_to_summarize) -# summary_input_tkns = count_tokens(summary_input) -# if summary_input_tkns > summarizer_settings.memory_warning_threshold * context_window: -# trunc_ratio = (summarizer_settings.memory_warning_threshold * context_window / summary_input_tkns) * 0.8 # For good measure... -# cutoff = int(len(message_sequence_to_summarize) * trunc_ratio) -# summary_input = str( -# [summarize_messages(agent_state, message_sequence_to_summarize=message_sequence_to_summarize[:cutoff], actor=actor)] -# + message_sequence_to_summarize[cutoff:] -# ) -# -# dummy_agent_id = agent_state.id -# message_sequence = [ -# Message(agent_id=dummy_agent_id, role=MessageRole.system, content=[TextContent(text=summary_prompt)]), -# Message(agent_id=dummy_agent_id, role=MessageRole.assistant, content=[TextContent(text=MESSAGE_SUMMARY_REQUEST_ACK)]), -# Message(agent_id=dummy_agent_id, role=MessageRole.user, content=[TextContent(text=summary_input)]), -# ] -# -# # TODO: We need to eventually have a separate LLM config for the summarizer LLM -# llm_config_no_inner_thoughts = agent_state.llm_config.model_copy(deep=True) -# llm_config_no_inner_thoughts.put_inner_thoughts_in_kwargs = False -# -# llm_client = LLMClient.create( -# provider_type=agent_state.llm_config.model_endpoint_type, -# put_inner_thoughts_first=False, -# actor=actor, -# ) -# # try to use new client, otherwise fallback to old flow -# # TODO: we can just directly call the LLM here? -# if llm_client: -# response = llm_client.send_llm_request( -# agent_type=agent_state.agent_type, -# messages=message_sequence, -# llm_config=llm_config_no_inner_thoughts, -# ) -# else: -# response = create( -# llm_config=llm_config_no_inner_thoughts, -# user_id=agent_state.created_by_id, -# messages=message_sequence, -# stream=False, -# ) -# -# printd(f"summarize_messages gpt reply: {response.choices[0]}") -# reply = response.choices[0].message.content -# return reply -# diff --git a/letta/model_specs/model_prices_and_context_window.json b/letta/model_specs/model_prices_and_context_window.json index e4081310..2ce2e366 100644 --- a/letta/model_specs/model_prices_and_context_window.json +++ b/letta/model_specs/model_prices_and_context_window.json @@ -330,6 +330,25 @@ "supports_video_input": true, "supports_vision": true }, + "amazon.nova-2-pro-preview-20251202-v1:0": { + "cache_read_input_token_cost": 5.46875e-7, + "input_cost_per_token": 2.1875e-6, + "input_cost_per_image_token": 2.1875e-6, + "input_cost_per_audio_token": 2.1875e-6, + "litellm_provider": "bedrock_converse", + "max_input_tokens": 1000000, + "max_output_tokens": 64000, + "max_tokens": 64000, + "mode": "chat", + "output_cost_per_token": 1.75e-5, + "supports_function_calling": true, + "supports_pdf_input": true, + "supports_prompt_caching": true, + "supports_reasoning": true, + "supports_response_schema": true, + "supports_video_input": true, + "supports_vision": true + }, "apac.amazon.nova-2-lite-v1:0": { "cache_read_input_token_cost": 8.25e-8, "input_cost_per_token": 3.3e-7, @@ -347,6 +366,25 @@ "supports_video_input": true, "supports_vision": true }, + "apac.amazon.nova-2-pro-preview-20251202-v1:0": { + "cache_read_input_token_cost": 5.46875e-7, + "input_cost_per_token": 2.1875e-6, + "input_cost_per_image_token": 2.1875e-6, + "input_cost_per_audio_token": 2.1875e-6, + "litellm_provider": "bedrock_converse", + "max_input_tokens": 1000000, + "max_output_tokens": 64000, + "max_tokens": 64000, + "mode": "chat", + "output_cost_per_token": 1.75e-5, + "supports_function_calling": true, + "supports_pdf_input": true, + "supports_prompt_caching": true, + "supports_reasoning": true, + "supports_response_schema": true, + "supports_video_input": true, + "supports_vision": true + }, "eu.amazon.nova-2-lite-v1:0": { "cache_read_input_token_cost": 8.25e-8, "input_cost_per_token": 3.3e-7, @@ -364,6 +402,25 @@ "supports_video_input": true, "supports_vision": true }, + "eu.amazon.nova-2-pro-preview-20251202-v1:0": { + "cache_read_input_token_cost": 5.46875e-7, + "input_cost_per_token": 2.1875e-6, + "input_cost_per_image_token": 2.1875e-6, + "input_cost_per_audio_token": 2.1875e-6, + "litellm_provider": "bedrock_converse", + "max_input_tokens": 1000000, + "max_output_tokens": 64000, + "max_tokens": 64000, + "mode": "chat", + "output_cost_per_token": 1.75e-5, + "supports_function_calling": true, + "supports_pdf_input": true, + "supports_prompt_caching": true, + "supports_reasoning": true, + "supports_response_schema": true, + "supports_video_input": true, + "supports_vision": true + }, "us.amazon.nova-2-lite-v1:0": { "cache_read_input_token_cost": 8.25e-8, "input_cost_per_token": 3.3e-7, @@ -381,6 +438,25 @@ "supports_video_input": true, "supports_vision": true }, + "us.amazon.nova-2-pro-preview-20251202-v1:0": { + "cache_read_input_token_cost": 5.46875e-7, + "input_cost_per_token": 2.1875e-6, + "input_cost_per_image_token": 2.1875e-6, + "input_cost_per_audio_token": 2.1875e-6, + "litellm_provider": "bedrock_converse", + "max_input_tokens": 1000000, + "max_output_tokens": 64000, + "max_tokens": 64000, + "mode": "chat", + "output_cost_per_token": 1.75e-5, + "supports_function_calling": true, + "supports_pdf_input": true, + "supports_prompt_caching": true, + "supports_reasoning": true, + "supports_response_schema": true, + "supports_video_input": true, + "supports_vision": true + }, "amazon.nova-2-multimodal-embeddings-v1:0": { "litellm_provider": "bedrock", "max_input_tokens": 8172, @@ -644,12 +720,13 @@ "supports_response_schema": true, "supports_tool_choice": true, "supports_vision": true, - "tool_use_system_prompt_tokens": 346 + "tool_use_system_prompt_tokens": 346, + "supports_native_streaming": true }, "anthropic.claude-3-5-sonnet-20240620-v1:0": { "input_cost_per_token": 3e-6, "litellm_provider": "bedrock", - "max_input_tokens": 200000, + "max_input_tokens": 1000000, "max_output_tokens": 4096, "max_tokens": 4096, "mode": "chat", @@ -658,14 +735,22 @@ "supports_pdf_input": true, "supports_response_schema": true, "supports_tool_choice": true, - "supports_vision": true + "supports_vision": true, + "input_cost_per_token_above_200k_tokens": 6e-6, + "output_cost_per_token_above_200k_tokens": 3e-5, + "cache_creation_input_token_cost_above_200k_tokens": 7.5e-6, + "cache_read_input_token_cost_above_200k_tokens": 6e-7, + "cache_creation_input_token_cost_above_1hr": 7.5e-6, + "cache_creation_input_token_cost_above_1hr_above_200k_tokens": 1.5e-5, + "cache_creation_input_token_cost": 3.75e-6, + "cache_read_input_token_cost": 3e-7 }, "anthropic.claude-3-5-sonnet-20241022-v2:0": { "cache_creation_input_token_cost": 3.75e-6, "cache_read_input_token_cost": 3e-7, "input_cost_per_token": 3e-6, "litellm_provider": "bedrock", - "max_input_tokens": 200000, + "max_input_tokens": 1000000, "max_output_tokens": 8192, "max_tokens": 8192, "mode": "chat", @@ -677,7 +762,13 @@ "supports_prompt_caching": true, "supports_response_schema": true, "supports_tool_choice": true, - "supports_vision": true + "supports_vision": true, + "input_cost_per_token_above_200k_tokens": 6e-6, + "output_cost_per_token_above_200k_tokens": 3e-5, + "cache_creation_input_token_cost_above_200k_tokens": 7.5e-6, + "cache_read_input_token_cost_above_200k_tokens": 6e-7, + "cache_creation_input_token_cost_above_1hr": 7.5e-6, + "cache_creation_input_token_cost_above_1hr_above_200k_tokens": 1.5e-5 }, "anthropic.claude-3-7-sonnet-20240620-v1:0": { "cache_creation_input_token_cost": 4.5e-6, @@ -1281,6 +1372,9 @@ "supports_function_calling": true }, "azure_ai/claude-haiku-4-5": { + "cache_creation_input_token_cost": 1.25e-6, + "cache_creation_input_token_cost_above_1hr": 2e-6, + "cache_read_input_token_cost": 1e-7, "input_cost_per_token": 1e-6, "litellm_provider": "azure_ai", "max_input_tokens": 200000, @@ -1299,6 +1393,9 @@ "supports_vision": true }, "azure_ai/claude-opus-4-5": { + "cache_creation_input_token_cost": 6.25e-6, + "cache_creation_input_token_cost_above_1hr": 1e-5, + "cache_read_input_token_cost": 5e-7, "input_cost_per_token": 5e-6, "litellm_provider": "azure_ai", "max_input_tokens": 200000, @@ -1317,6 +1414,9 @@ "supports_vision": true }, "azure_ai/claude-opus-4-1": { + "cache_creation_input_token_cost": 1.875e-5, + "cache_creation_input_token_cost_above_1hr": 3e-5, + "cache_read_input_token_cost": 1.5e-6, "input_cost_per_token": 1.5e-5, "litellm_provider": "azure_ai", "max_input_tokens": 200000, @@ -1335,6 +1435,9 @@ "supports_vision": true }, "azure_ai/claude-sonnet-4-5": { + "cache_creation_input_token_cost": 3.75e-6, + "cache_creation_input_token_cost_above_1hr": 6e-6, + "cache_read_input_token_cost": 3e-7, "input_cost_per_token": 3e-6, "litellm_provider": "azure_ai", "max_input_tokens": 200000, @@ -1391,6 +1494,14 @@ "supports_response_schema": true, "supports_tool_choice": true }, + "azure_ai/model_router": { + "input_cost_per_token": 1.4e-7, + "output_cost_per_token": 0, + "litellm_provider": "azure_ai", + "mode": "chat", + "source": "https://azure.microsoft.com/en-us/pricing/details/ai-services/", + "comment": "Flat cost of $0.14 per M input tokens for Azure AI Foundry Model Router infrastructure. Use pattern: azure_ai/model_router/ where deployment-name is your Azure deployment (e.g., azure-model-router)" + }, "azure/eu/gpt-4o-2024-08-06": { "deprecation_date": "2026-02-27", "cache_read_input_token_cost": 1.375e-6, @@ -2861,7 +2972,7 @@ "supports_reasoning": true, "supports_response_schema": true, "supports_system_messages": true, - "supports_tool_choice": false, + "supports_tool_choice": true, "supports_vision": true }, "azure/gpt-5-chat-latest": { @@ -2888,7 +2999,7 @@ "supports_reasoning": true, "supports_response_schema": true, "supports_system_messages": true, - "supports_tool_choice": false, + "supports_tool_choice": true, "supports_vision": true }, "azure/gpt-5-codex": { @@ -3281,12 +3392,12 @@ "cache_read_input_token_cost": 1.75e-7, "input_cost_per_token": 1.75e-6, "litellm_provider": "azure", - "max_input_tokens": 128000, - "max_output_tokens": 16384, - "max_tokens": 16384, - "mode": "chat", + "max_input_tokens": 272000, + "max_output_tokens": 128000, + "max_tokens": 128000, + "mode": "responses", "output_cost_per_token": 1.4e-5, - "supported_endpoints": ["/v1/chat/completions", "/v1/responses"], + "supported_endpoints": ["/v1/responses"], "supported_modalities": ["text", "image"], "supported_output_modalities": ["text"], "supports_function_calling": true, @@ -6075,13 +6186,13 @@ "supports_tool_choice": true }, "cerebras/gpt-oss-120b": { - "input_cost_per_token": 2.5e-7, + "input_cost_per_token": 3.5e-7, "litellm_provider": "cerebras", "max_input_tokens": 131072, "max_output_tokens": 32768, "max_tokens": 32768, "mode": "chat", - "output_cost_per_token": 6.9e-7, + "output_cost_per_token": 7.5e-7, "source": "https://www.cerebras.ai/blog/openai-gpt-oss-120b-runs-fastest-on-cerebras", "supports_function_calling": true, "supports_parallel_function_calling": true, @@ -6099,6 +6210,7 @@ "output_cost_per_token": 8e-7, "source": "https://inference-docs.cerebras.ai/support/pricing", "supports_function_calling": true, + "supports_reasoning": true, "supports_tool_choice": true }, "cerebras/zai-glm-4.6": { @@ -9050,6 +9162,7 @@ "supports_tool_choice": true }, "deepinfra/google/gemini-2.0-flash-001": { + "deprecation_date": "2026-03-31", "max_tokens": 1000000, "max_input_tokens": 1000000, "max_output_tokens": 1000000, @@ -9493,6 +9606,48 @@ "mode": "completion", "output_cost_per_token": 5e-7 }, + "deepseek-v3-2-251201": { + "input_cost_per_token": 0.0, + "litellm_provider": "volcengine", + "max_input_tokens": 98304, + "max_output_tokens": 32768, + "max_tokens": 32768, + "mode": "chat", + "output_cost_per_token": 0.0, + "supports_assistant_prefill": true, + "supports_function_calling": true, + "supports_prompt_caching": true, + "supports_reasoning": true, + "supports_tool_choice": true + }, + "glm-4-7-251222": { + "input_cost_per_token": 0.0, + "litellm_provider": "volcengine", + "max_input_tokens": 204800, + "max_output_tokens": 131072, + "max_tokens": 131072, + "mode": "chat", + "output_cost_per_token": 0.0, + "supports_assistant_prefill": true, + "supports_function_calling": true, + "supports_prompt_caching": true, + "supports_reasoning": true, + "supports_tool_choice": true + }, + "kimi-k2-thinking-251104": { + "input_cost_per_token": 0.0, + "litellm_provider": "volcengine", + "max_input_tokens": 229376, + "max_output_tokens": 32768, + "max_tokens": 32768, + "mode": "chat", + "output_cost_per_token": 0.0, + "supports_assistant_prefill": true, + "supports_function_calling": true, + "supports_prompt_caching": true, + "supports_reasoning": true, + "supports_tool_choice": true + }, "doubao-embedding": { "input_cost_per_token": 0.0, "litellm_provider": "volcengine", @@ -11303,6 +11458,7 @@ }, "gemini-2.0-flash": { "cache_read_input_token_cost": 2.5e-8, + "deprecation_date": "2026-03-31", "input_cost_per_audio_token": 7e-7, "input_cost_per_token": 1e-7, "litellm_provider": "vertex_ai-language-models", @@ -11334,7 +11490,7 @@ }, "gemini-2.0-flash-001": { "cache_read_input_token_cost": 3.75e-8, - "deprecation_date": "2026-02-05", + "deprecation_date": "2026-03-31", "input_cost_per_audio_token": 1e-6, "input_cost_per_token": 1.5e-7, "litellm_provider": "vertex_ai-language-models", @@ -11404,6 +11560,7 @@ }, "gemini-2.0-flash-lite": { "cache_read_input_token_cost": 1.875e-8, + "deprecation_date": "2026-03-31", "input_cost_per_audio_token": 7.5e-8, "input_cost_per_token": 7.5e-8, "litellm_provider": "vertex_ai-language-models", @@ -11432,7 +11589,7 @@ }, "gemini-2.0-flash-lite-001": { "cache_read_input_token_cost": 1.875e-8, - "deprecation_date": "2026-02-25", + "deprecation_date": "2026-03-31", "input_cost_per_audio_token": 7.5e-8, "input_cost_per_token": 7.5e-8, "litellm_provider": "vertex_ai-language-models", @@ -11793,9 +11950,37 @@ "supports_vision": true, "supports_web_search": true }, + "deep-research-pro-preview-12-2025": { + "input_cost_per_image": 0.0011, + "input_cost_per_token": 2e-6, + "input_cost_per_token_batches": 1e-6, + "litellm_provider": "vertex_ai-language-models", + "max_input_tokens": 65536, + "max_output_tokens": 32768, + "max_tokens": 32768, + "mode": "image_generation", + "output_cost_per_image": 0.134, + "output_cost_per_image_token": 0.00012, + "output_cost_per_token": 1.2e-5, + "output_cost_per_token_batches": 6e-6, + "source": "https://ai.google.dev/gemini-api/docs/pricing", + "supported_endpoints": [ + "/v1/chat/completions", + "/v1/completions", + "/v1/batch" + ], + "supported_modalities": ["text", "image"], + "supported_output_modalities": ["text", "image"], + "supports_function_calling": false, + "supports_prompt_caching": true, + "supports_response_schema": true, + "supports_system_messages": true, + "supports_vision": true, + "supports_web_search": true + }, "gemini-2.5-flash-lite": { - "cache_read_input_token_cost": 2.5e-8, - "input_cost_per_audio_token": 5e-7, + "cache_read_input_token_cost": 1e-8, + "input_cost_per_audio_token": 3e-7, "input_cost_per_token": 1e-7, "litellm_provider": "vertex_ai-language-models", "max_audio_length_hours": 8.4, @@ -11832,7 +12017,7 @@ "supports_web_search": true }, "gemini-2.5-flash-lite-preview-09-2025": { - "cache_read_input_token_cost": 2.5e-8, + "cache_read_input_token_cost": 1e-8, "input_cost_per_audio_token": 3e-7, "input_cost_per_token": 1e-7, "litellm_provider": "vertex_ai-language-models", @@ -12166,7 +12351,8 @@ "supports_tool_choice": true, "supports_video_input": true, "supports_vision": true, - "supports_web_search": true + "supports_web_search": true, + "supports_native_streaming": true }, "vertex_ai/gemini-3-pro-preview": { "cache_read_input_token_cost": 2e-7, @@ -12207,7 +12393,8 @@ "supports_tool_choice": true, "supports_video_input": true, "supports_vision": true, - "supports_web_search": true + "supports_web_search": true, + "supports_native_streaming": true }, "vertex_ai/gemini-3-flash-preview": { "cache_read_input_token_cost": 5e-8, @@ -12243,7 +12430,8 @@ "supports_tool_choice": true, "supports_video_input": true, "supports_vision": true, - "supports_web_search": true + "supports_web_search": true, + "supports_native_streaming": true }, "gemini-2.5-pro-exp-03-25": { "cache_read_input_token_cost": 1.25e-7, @@ -12431,6 +12619,59 @@ "supports_vision": true, "supports_web_search": true }, + "gemini-robotics-er-1.5-preview": { + "cache_read_input_token_cost": 0, + "input_cost_per_token": 3e-7, + "input_cost_per_audio_token": 1e-6, + "litellm_provider": "vertex_ai-language-models", + "max_input_tokens": 1048576, + "max_output_tokens": 65535, + "max_tokens": 65535, + "mode": "chat", + "output_cost_per_token": 2.5e-6, + "output_cost_per_reasoning_token": 2.5e-6, + "source": "https://ai.google.dev/gemini-api/docs/models#gemini-robotics-er-1-5-preview", + "supported_endpoints": ["/v1/chat/completions", "/v1/completions"], + "supported_modalities": ["text", "image", "video", "audio"], + "supported_output_modalities": ["text"], + "supports_audio_output": false, + "supports_function_calling": true, + "supports_parallel_function_calling": true, + "supports_prompt_caching": false, + "supports_reasoning": true, + "supports_response_schema": true, + "supports_system_messages": true, + "supports_tool_choice": true, + "supports_url_context": true, + "supports_vision": true + }, + "gemini/gemini-robotics-er-1.5-preview": { + "cache_read_input_token_cost": 0, + "input_cost_per_token": 3e-7, + "input_cost_per_audio_token": 1e-6, + "litellm_provider": "gemini", + "max_input_tokens": 1048576, + "max_output_tokens": 65535, + "max_tokens": 65535, + "mode": "chat", + "output_cost_per_token": 2.5e-6, + "output_cost_per_reasoning_token": 2.5e-6, + "source": "https://ai.google.dev/gemini-api/docs/models#gemini-robotics-er-1-5-preview", + "supported_endpoints": ["/v1/chat/completions", "/v1/completions"], + "supported_modalities": ["text", "image", "video", "audio"], + "supported_output_modalities": ["text"], + "supports_audio_output": false, + "supports_function_calling": true, + "supports_parallel_function_calling": true, + "supports_prompt_caching": false, + "supports_reasoning": true, + "supports_response_schema": true, + "supports_system_messages": true, + "supports_tool_choice": true, + "supports_url_context": true, + "supports_vision": true, + "supports_web_search": true + }, "gemini-2.5-computer-use-preview-10-2025": { "input_cost_per_token": 1.25e-6, "input_cost_per_token_above_200k_tokens": 2.5e-6, @@ -12879,6 +13120,7 @@ }, "gemini/gemini-2.0-flash": { "cache_read_input_token_cost": 2.5e-8, + "deprecation_date": "2026-03-31", "input_cost_per_audio_token": 7e-7, "input_cost_per_token": 1e-7, "litellm_provider": "gemini", @@ -12911,6 +13153,7 @@ }, "gemini/gemini-2.0-flash-001": { "cache_read_input_token_cost": 2.5e-8, + "deprecation_date": "2026-03-31", "input_cost_per_audio_token": 7e-7, "input_cost_per_token": 1e-7, "litellm_provider": "gemini", @@ -12982,6 +13225,7 @@ }, "gemini/gemini-2.0-flash-lite": { "cache_read_input_token_cost": 1.875e-8, + "deprecation_date": "2026-03-31", "input_cost_per_audio_token": 7.5e-8, "input_cost_per_token": 7.5e-8, "litellm_provider": "gemini", @@ -13390,9 +13634,39 @@ "supports_vision": true, "supports_web_search": true }, + "gemini/deep-research-pro-preview-12-2025": { + "input_cost_per_image": 0.0011, + "input_cost_per_token": 2e-6, + "input_cost_per_token_batches": 1e-6, + "litellm_provider": "gemini", + "max_input_tokens": 65536, + "max_output_tokens": 32768, + "max_tokens": 32768, + "mode": "image_generation", + "output_cost_per_image": 0.134, + "output_cost_per_image_token": 0.00012, + "output_cost_per_token": 1.2e-5, + "rpm": 1000, + "tpm": 4000000, + "output_cost_per_token_batches": 6e-6, + "source": "https://ai.google.dev/gemini-api/docs/pricing", + "supported_endpoints": [ + "/v1/chat/completions", + "/v1/completions", + "/v1/batch" + ], + "supported_modalities": ["text", "image"], + "supported_output_modalities": ["text", "image"], + "supports_function_calling": false, + "supports_prompt_caching": true, + "supports_response_schema": true, + "supports_system_messages": true, + "supports_vision": true, + "supports_web_search": true + }, "gemini/gemini-2.5-flash-lite": { - "cache_read_input_token_cost": 2.5e-8, - "input_cost_per_audio_token": 5e-7, + "cache_read_input_token_cost": 1e-8, + "input_cost_per_audio_token": 3e-7, "input_cost_per_token": 1e-7, "litellm_provider": "gemini", "max_audio_length_hours": 8.4, @@ -13431,7 +13705,7 @@ "tpm": 250000 }, "gemini/gemini-2.5-flash-lite-preview-09-2025": { - "cache_read_input_token_cost": 2.5e-8, + "cache_read_input_token_cost": 1e-8, "input_cost_per_audio_token": 3e-7, "input_cost_per_token": 1e-7, "litellm_provider": "gemini", @@ -13835,6 +14109,48 @@ "supports_web_search": true, "tpm": 800000 }, + "gemini/gemini-3.1-pro-preview": { + "cache_read_input_token_cost": 2e-7, + "cache_read_input_token_cost_above_200k_tokens": 4e-7, + "input_cost_per_token": 2e-6, + "input_cost_per_token_above_200k_tokens": 4e-6, + "input_cost_per_token_batches": 1e-6, + "litellm_provider": "gemini", + "max_audio_length_hours": 8.4, + "max_audio_per_prompt": 1, + "max_images_per_prompt": 3000, + "max_input_tokens": 1048576, + "max_output_tokens": 65535, + "max_pdf_size_mb": 30, + "max_tokens": 65535, + "max_video_length": 1, + "max_videos_per_prompt": 10, + "mode": "chat", + "output_cost_per_token": 1.2e-5, + "output_cost_per_token_above_200k_tokens": 1.8e-5, + "output_cost_per_token_batches": 6e-6, + "rpm": 2000, + "source": "https://ai.google.dev/pricing/gemini-3", + "supported_endpoints": [ + "/v1/chat/completions", + "/v1/completions", + "/v1/batch" + ], + "supported_modalities": ["text", "image", "audio", "video"], + "supported_output_modalities": ["text"], + "supports_audio_input": true, + "supports_function_calling": true, + "supports_pdf_input": true, + "supports_prompt_caching": true, + "supports_reasoning": true, + "supports_response_schema": true, + "supports_system_messages": true, + "supports_tool_choice": true, + "supports_video_input": true, + "supports_vision": true, + "supports_web_search": true, + "tpm": 800000 + }, "gemini/gemini-3-flash-preview": { "cache_read_input_token_cost": 5e-8, "input_cost_per_audio_token": 1e-6, @@ -13873,6 +14189,7 @@ "supports_url_context": true, "supports_vision": true, "supports_web_search": true, + "supports_native_streaming": true, "tpm": 800000 }, "gemini-3-flash-preview": { @@ -13911,7 +14228,8 @@ "supports_tool_choice": true, "supports_url_context": true, "supports_vision": true, - "supports_web_search": true + "supports_web_search": true, + "supports_native_streaming": true }, "gemini/gemini-2.5-pro-exp-03-25": { "cache_read_input_token_cost": 0.0, @@ -14752,6 +15070,181 @@ "output_cost_per_token": 0.0, "output_vector_size": 2560 }, + "gmi/anthropic/claude-opus-4.5": { + "input_cost_per_token": 5e-6, + "litellm_provider": "gmi", + "max_input_tokens": 409600, + "max_output_tokens": 32000, + "max_tokens": 32000, + "mode": "chat", + "output_cost_per_token": 2.5e-5, + "supports_function_calling": true, + "supports_vision": true + }, + "gmi/anthropic/claude-sonnet-4.5": { + "input_cost_per_token": 3e-6, + "litellm_provider": "gmi", + "max_input_tokens": 409600, + "max_output_tokens": 32000, + "max_tokens": 32000, + "mode": "chat", + "output_cost_per_token": 1.5e-5, + "supports_function_calling": true, + "supports_vision": true + }, + "gmi/anthropic/claude-sonnet-4": { + "input_cost_per_token": 3e-6, + "litellm_provider": "gmi", + "max_input_tokens": 409600, + "max_output_tokens": 32000, + "max_tokens": 32000, + "mode": "chat", + "output_cost_per_token": 1.5e-5, + "supports_function_calling": true, + "supports_vision": true + }, + "gmi/anthropic/claude-opus-4": { + "input_cost_per_token": 1.5e-5, + "litellm_provider": "gmi", + "max_input_tokens": 409600, + "max_output_tokens": 32000, + "max_tokens": 32000, + "mode": "chat", + "output_cost_per_token": 7.5e-5, + "supports_function_calling": true, + "supports_vision": true + }, + "gmi/openai/gpt-5.2": { + "input_cost_per_token": 1.75e-6, + "litellm_provider": "gmi", + "max_input_tokens": 409600, + "max_output_tokens": 32000, + "max_tokens": 32000, + "mode": "chat", + "output_cost_per_token": 1.4e-5, + "supports_function_calling": true + }, + "gmi/openai/gpt-5.1": { + "input_cost_per_token": 1.25e-6, + "litellm_provider": "gmi", + "max_input_tokens": 409600, + "max_output_tokens": 32000, + "max_tokens": 32000, + "mode": "chat", + "output_cost_per_token": 1e-5, + "supports_function_calling": true + }, + "gmi/openai/gpt-5": { + "input_cost_per_token": 1.25e-6, + "litellm_provider": "gmi", + "max_input_tokens": 409600, + "max_output_tokens": 32000, + "max_tokens": 32000, + "mode": "chat", + "output_cost_per_token": 1e-5, + "supports_function_calling": true + }, + "gmi/openai/gpt-4o": { + "input_cost_per_token": 2.5e-6, + "litellm_provider": "gmi", + "max_input_tokens": 131072, + "max_output_tokens": 16384, + "max_tokens": 16384, + "mode": "chat", + "output_cost_per_token": 1e-5, + "supports_function_calling": true, + "supports_vision": true + }, + "gmi/openai/gpt-4o-mini": { + "input_cost_per_token": 1.5e-7, + "litellm_provider": "gmi", + "max_input_tokens": 131072, + "max_output_tokens": 16384, + "max_tokens": 16384, + "mode": "chat", + "output_cost_per_token": 6e-7, + "supports_function_calling": true, + "supports_vision": true + }, + "gmi/deepseek-ai/DeepSeek-V3.2": { + "input_cost_per_token": 2.8e-7, + "litellm_provider": "gmi", + "max_input_tokens": 163840, + "max_output_tokens": 16384, + "max_tokens": 16384, + "mode": "chat", + "output_cost_per_token": 4e-7, + "supports_function_calling": true + }, + "gmi/deepseek-ai/DeepSeek-V3-0324": { + "input_cost_per_token": 2.8e-7, + "litellm_provider": "gmi", + "max_input_tokens": 163840, + "max_output_tokens": 16384, + "max_tokens": 16384, + "mode": "chat", + "output_cost_per_token": 8.8e-7, + "supports_function_calling": true + }, + "gmi/google/gemini-3-pro-preview": { + "input_cost_per_token": 2e-6, + "litellm_provider": "gmi", + "max_input_tokens": 1048576, + "max_output_tokens": 65536, + "max_tokens": 65536, + "mode": "chat", + "output_cost_per_token": 1.2e-5, + "supports_function_calling": true, + "supports_vision": true + }, + "gmi/google/gemini-3-flash-preview": { + "input_cost_per_token": 5e-7, + "litellm_provider": "gmi", + "max_input_tokens": 1048576, + "max_output_tokens": 65536, + "max_tokens": 65536, + "mode": "chat", + "output_cost_per_token": 3e-6, + "supports_function_calling": true, + "supports_vision": true + }, + "gmi/moonshotai/Kimi-K2-Thinking": { + "input_cost_per_token": 8e-7, + "litellm_provider": "gmi", + "max_input_tokens": 262144, + "max_output_tokens": 16384, + "max_tokens": 16384, + "mode": "chat", + "output_cost_per_token": 1.2e-6 + }, + "gmi/MiniMaxAI/MiniMax-M2.1": { + "input_cost_per_token": 3e-7, + "litellm_provider": "gmi", + "max_input_tokens": 196608, + "max_output_tokens": 16384, + "max_tokens": 16384, + "mode": "chat", + "output_cost_per_token": 1.2e-6 + }, + "gmi/Qwen/Qwen3-VL-235B-A22B-Instruct-FP8": { + "input_cost_per_token": 3e-7, + "litellm_provider": "gmi", + "max_input_tokens": 262144, + "max_output_tokens": 16384, + "max_tokens": 16384, + "mode": "chat", + "output_cost_per_token": 1.4e-6, + "supports_vision": true + }, + "gmi/zai-org/GLM-4.7-FP8": { + "input_cost_per_token": 4e-7, + "litellm_provider": "gmi", + "max_input_tokens": 202752, + "max_output_tokens": 16384, + "max_tokens": 16384, + "mode": "chat", + "output_cost_per_token": 2e-6 + }, "google.gemma-3-12b-it": { "input_cost_per_token": 9e-8, "litellm_provider": "bedrock_converse", @@ -15491,14 +15984,14 @@ "supports_vision": true }, "gpt-4o-audio-preview": { - "input_cost_per_audio_token": 0.0001, + "input_cost_per_audio_token": 4e-5, "input_cost_per_token": 2.5e-6, "litellm_provider": "openai", "max_input_tokens": 128000, "max_output_tokens": 16384, "max_tokens": 16384, "mode": "chat", - "output_cost_per_audio_token": 0.0002, + "output_cost_per_audio_token": 8e-5, "output_cost_per_token": 1e-5, "supports_audio_input": true, "supports_audio_output": true, @@ -15508,14 +16001,14 @@ "supports_tool_choice": true }, "gpt-4o-audio-preview-2024-10-01": { - "input_cost_per_audio_token": 0.0001, + "input_cost_per_audio_token": 4e-5, "input_cost_per_token": 2.5e-6, "litellm_provider": "openai", "max_input_tokens": 128000, "max_output_tokens": 16384, "max_tokens": 16384, "mode": "chat", - "output_cost_per_audio_token": 0.0002, + "output_cost_per_audio_token": 8e-5, "output_cost_per_token": 1e-5, "supports_audio_input": true, "supports_audio_output": true, @@ -15558,6 +16051,156 @@ "supports_system_messages": true, "supports_tool_choice": true }, + "gpt-audio": { + "input_cost_per_audio_token": 3.2e-5, + "input_cost_per_token": 2.5e-6, + "litellm_provider": "openai", + "max_input_tokens": 128000, + "max_output_tokens": 16384, + "max_tokens": 16384, + "mode": "chat", + "output_cost_per_audio_token": 6.4e-5, + "output_cost_per_token": 1e-5, + "supported_endpoints": [ + "/v1/chat/completions", + "/v1/responses", + "/v1/realtime", + "/v1/batch" + ], + "supported_modalities": ["text", "audio"], + "supported_output_modalities": ["text", "audio"], + "supports_audio_input": true, + "supports_audio_output": true, + "supports_function_calling": true, + "supports_native_streaming": true, + "supports_parallel_function_calling": true, + "supports_prompt_caching": false, + "supports_reasoning": false, + "supports_response_schema": false, + "supports_system_messages": true, + "supports_tool_choice": true, + "supports_vision": false + }, + "gpt-audio-2025-08-28": { + "input_cost_per_audio_token": 3.2e-5, + "input_cost_per_token": 2.5e-6, + "litellm_provider": "openai", + "max_input_tokens": 128000, + "max_output_tokens": 16384, + "max_tokens": 16384, + "mode": "chat", + "output_cost_per_audio_token": 6.4e-5, + "output_cost_per_token": 1e-5, + "supported_endpoints": [ + "/v1/chat/completions", + "/v1/responses", + "/v1/realtime", + "/v1/batch" + ], + "supported_modalities": ["text", "audio"], + "supported_output_modalities": ["text", "audio"], + "supports_audio_input": true, + "supports_audio_output": true, + "supports_function_calling": true, + "supports_native_streaming": true, + "supports_parallel_function_calling": true, + "supports_prompt_caching": false, + "supports_reasoning": false, + "supports_response_schema": false, + "supports_system_messages": true, + "supports_tool_choice": true, + "supports_vision": false + }, + "gpt-audio-mini": { + "input_cost_per_audio_token": 1e-5, + "input_cost_per_token": 6e-7, + "litellm_provider": "openai", + "max_input_tokens": 128000, + "max_output_tokens": 16384, + "max_tokens": 16384, + "mode": "chat", + "output_cost_per_audio_token": 2e-5, + "output_cost_per_token": 2.4e-6, + "supported_endpoints": [ + "/v1/chat/completions", + "/v1/responses", + "/v1/realtime", + "/v1/batch" + ], + "supported_modalities": ["text", "audio"], + "supported_output_modalities": ["text", "audio"], + "supports_audio_input": true, + "supports_audio_output": true, + "supports_function_calling": true, + "supports_native_streaming": true, + "supports_parallel_function_calling": true, + "supports_prompt_caching": false, + "supports_reasoning": false, + "supports_response_schema": false, + "supports_system_messages": true, + "supports_tool_choice": true, + "supports_vision": false + }, + "gpt-audio-mini-2025-10-06": { + "input_cost_per_audio_token": 1e-5, + "input_cost_per_token": 6e-7, + "litellm_provider": "openai", + "max_input_tokens": 128000, + "max_output_tokens": 16384, + "max_tokens": 16384, + "mode": "chat", + "output_cost_per_audio_token": 2e-5, + "output_cost_per_token": 2.4e-6, + "supported_endpoints": [ + "/v1/chat/completions", + "/v1/responses", + "/v1/realtime", + "/v1/batch" + ], + "supported_modalities": ["text", "audio"], + "supported_output_modalities": ["text", "audio"], + "supports_audio_input": true, + "supports_audio_output": true, + "supports_function_calling": true, + "supports_native_streaming": true, + "supports_parallel_function_calling": true, + "supports_prompt_caching": false, + "supports_reasoning": false, + "supports_response_schema": false, + "supports_system_messages": true, + "supports_tool_choice": true, + "supports_vision": false + }, + "gpt-audio-mini-2025-12-15": { + "input_cost_per_audio_token": 1e-5, + "input_cost_per_token": 6e-7, + "litellm_provider": "openai", + "max_input_tokens": 128000, + "max_output_tokens": 16384, + "max_tokens": 16384, + "mode": "chat", + "output_cost_per_audio_token": 2e-5, + "output_cost_per_token": 2.4e-6, + "supported_endpoints": [ + "/v1/chat/completions", + "/v1/responses", + "/v1/realtime", + "/v1/batch" + ], + "supported_modalities": ["text", "audio"], + "supported_output_modalities": ["text", "audio"], + "supports_audio_input": true, + "supports_audio_output": true, + "supports_function_calling": true, + "supports_native_streaming": true, + "supports_parallel_function_calling": true, + "supports_prompt_caching": false, + "supports_reasoning": false, + "supports_response_schema": false, + "supports_system_messages": true, + "supports_tool_choice": true, + "supports_vision": false + }, "gpt-4o-mini": { "cache_read_input_token_cost": 7.5e-8, "cache_read_input_token_cost_priority": 1.25e-7, @@ -16632,7 +17275,7 @@ "input_cost_per_token": 1.75e-6, "input_cost_per_token_priority": 3.5e-6, "litellm_provider": "openai", - "max_input_tokens": 400000, + "max_input_tokens": 272000, "max_output_tokens": 128000, "max_tokens": 128000, "mode": "responses", @@ -18392,6 +19035,7 @@ "supports_function_calling": true, "supports_tool_choice": true, "supports_prompt_caching": true, + "supports_reasoning": true, "supports_system_messages": true, "max_input_tokens": 1000000, "max_output_tokens": 8192 @@ -18406,6 +19050,7 @@ "supports_function_calling": true, "supports_tool_choice": true, "supports_prompt_caching": true, + "supports_reasoning": true, "supports_system_messages": true, "max_input_tokens": 1000000, "max_output_tokens": 8192 @@ -18420,6 +19065,7 @@ "supports_function_calling": true, "supports_tool_choice": true, "supports_prompt_caching": true, + "supports_reasoning": true, "supports_system_messages": true, "max_input_tokens": 200000, "max_output_tokens": 8192 @@ -19111,6 +19757,20 @@ "supports_tool_choice": true, "supports_web_search": true }, + "moonshot/kimi-k2.5": { + "cache_read_input_token_cost": 1e-7, + "input_cost_per_token": 6e-7, + "litellm_provider": "moonshot", + "max_input_tokens": 262144, + "max_output_tokens": 262144, + "max_tokens": 262144, + "mode": "chat", + "output_cost_per_token": 3e-6, + "source": "https://platform.moonshot.ai/docs/pricing/chat", + "supports_function_calling": true, + "supports_tool_choice": true, + "supports_vision": true + }, "moonshot/kimi-latest": { "cache_read_input_token_cost": 1.5e-7, "input_cost_per_token": 2e-6, @@ -20925,6 +21585,7 @@ "supports_tool_choice": true }, "openrouter/google/gemini-2.0-flash-001": { + "deprecation_date": "2026-03-31", "input_cost_per_audio_token": 7e-7, "input_cost_per_token": 1e-7, "litellm_provider": "openrouter", @@ -21207,7 +21868,7 @@ "mode": "chat", "output_cost_per_token": 1.02e-6, "supports_function_calling": true, - "supports_prompt_caching": false, + "supports_prompt_caching": true, "supports_reasoning": true, "supports_tool_choice": true }, @@ -21343,6 +22004,20 @@ "output_cost_per_token": 6.5e-7, "supports_tool_choice": true }, + "openrouter/moonshotai/kimi-k2.5": { + "cache_read_input_token_cost": 1e-7, + "input_cost_per_token": 6e-7, + "litellm_provider": "openrouter", + "max_input_tokens": 262144, + "max_output_tokens": 262144, + "max_tokens": 262144, + "mode": "chat", + "output_cost_per_token": 3e-6, + "source": "https://openrouter.ai/moonshotai/kimi-k2.5", + "supports_function_calling": true, + "supports_tool_choice": true, + "supports_vision": true + }, "openrouter/nousresearch/nous-hermes-llama2-13b": { "input_cost_per_token": 2e-7, "litellm_provider": "openrouter", @@ -21546,11 +22221,12 @@ "cache_read_input_token_cost": 1.75e-7, "input_cost_per_token": 1.75e-6, "litellm_provider": "openrouter", - "max_input_tokens": 400000, + "max_input_tokens": 272000, "max_output_tokens": 128000, "max_tokens": 128000, - "mode": "chat", + "mode": "responses", "output_cost_per_token": 1.4e-5, + "supported_endpoints": ["/v1/responses"], "supported_modalities": ["text", "image"], "supported_output_modalities": ["text"], "supports_reasoning": true, @@ -21869,6 +22545,7 @@ "output_cost_per_token": 1.75e-6, "source": "https://openrouter.ai/z-ai/glm-4.6", "supports_function_calling": true, + "supports_prompt_caching": true, "supports_reasoning": true, "supports_tool_choice": true }, @@ -21882,9 +22559,76 @@ "output_cost_per_token": 1.9e-6, "source": "https://openrouter.ai/z-ai/glm-4.6:exacto", "supports_function_calling": true, + "supports_prompt_caching": true, "supports_reasoning": true, "supports_tool_choice": true }, + "openrouter/xiaomi/mimo-v2-flash": { + "input_cost_per_token": 9e-8, + "output_cost_per_token": 2.9e-7, + "cache_creation_input_token_cost": 0.0, + "cache_read_input_token_cost": 0.0, + "litellm_provider": "openrouter", + "max_input_tokens": 262144, + "max_output_tokens": 16384, + "max_tokens": 16384, + "mode": "chat", + "supports_function_calling": true, + "supports_tool_choice": true, + "supports_reasoning": true, + "supports_vision": false, + "supports_prompt_caching": false + }, + "openrouter/z-ai/glm-4.7": { + "input_cost_per_token": 4e-7, + "output_cost_per_token": 1.5e-6, + "cache_creation_input_token_cost": 0.0, + "cache_read_input_token_cost": 0.0, + "litellm_provider": "openrouter", + "max_input_tokens": 202752, + "max_output_tokens": 64000, + "max_tokens": 64000, + "mode": "chat", + "supports_function_calling": true, + "supports_tool_choice": true, + "supports_reasoning": true, + "supports_vision": true, + "supports_prompt_caching": false, + "supports_assistant_prefill": true + }, + "openrouter/z-ai/glm-4.7-flash": { + "input_cost_per_token": 7e-8, + "output_cost_per_token": 4e-7, + "cache_creation_input_token_cost": 0.0, + "cache_read_input_token_cost": 0.0, + "litellm_provider": "openrouter", + "max_input_tokens": 200000, + "max_output_tokens": 32000, + "max_tokens": 32000, + "mode": "chat", + "supports_function_calling": true, + "supports_tool_choice": true, + "supports_reasoning": true, + "supports_vision": true, + "supports_prompt_caching": false + }, + "openrouter/minimax/minimax-m2.1": { + "input_cost_per_token": 2.7e-7, + "output_cost_per_token": 1.2e-6, + "cache_creation_input_token_cost": 0.0, + "cache_read_input_token_cost": 0.0, + "litellm_provider": "openrouter", + "max_input_tokens": 204000, + "max_output_tokens": 64000, + "max_tokens": 64000, + "mode": "chat", + "supports_function_calling": true, + "supports_tool_choice": true, + "supports_reasoning": true, + "supports_vision": true, + "supports_prompt_caching": false, + "supports_computer_use": false + }, "ovhcloud/DeepSeek-R1-Distill-Llama-70B": { "input_cost_per_token": 6.7e-7, "litellm_provider": "ovhcloud", @@ -24464,6 +25208,34 @@ "supports_reasoning": true, "supports_tool_choice": true }, + "together_ai/zai-org/GLM-4.7": { + "input_cost_per_token": 4.5e-7, + "litellm_provider": "together_ai", + "max_input_tokens": 200000, + "max_output_tokens": 200000, + "max_tokens": 200000, + "mode": "chat", + "output_cost_per_token": 2e-6, + "source": "https://www.together.ai/models/glm-4-7", + "supports_function_calling": true, + "supports_parallel_function_calling": true, + "supports_reasoning": true, + "supports_tool_choice": true + }, + "together_ai/moonshotai/Kimi-K2.5": { + "input_cost_per_token": 5e-7, + "litellm_provider": "together_ai", + "max_input_tokens": 256000, + "max_output_tokens": 256000, + "max_tokens": 256000, + "mode": "chat", + "output_cost_per_token": 2.8e-6, + "source": "https://www.together.ai/models/kimi-k2-5", + "supports_function_calling": true, + "supports_tool_choice": true, + "supports_vision": true, + "supports_reasoning": true + }, "together_ai/moonshotai/Kimi-K2-Instruct-0905": { "input_cost_per_token": 1e-6, "litellm_provider": "together_ai", @@ -25156,7 +25928,9 @@ "max_output_tokens": 16384, "max_tokens": 16384, "mode": "chat", - "output_cost_per_token": 3e-7 + "output_cost_per_token": 3e-7, + "supports_function_calling": true, + "supports_tool_choice": true }, "vercel_ai_gateway/alibaba/qwen3-coder": { "input_cost_per_token": 4e-7, @@ -25165,7 +25939,9 @@ "max_output_tokens": 66536, "max_tokens": 66536, "mode": "chat", - "output_cost_per_token": 1.6e-6 + "output_cost_per_token": 1.6e-6, + "supports_function_calling": true, + "supports_tool_choice": true }, "vercel_ai_gateway/amazon/nova-lite": { "input_cost_per_token": 6e-8, @@ -25174,7 +25950,10 @@ "max_output_tokens": 8192, "max_tokens": 8192, "mode": "chat", - "output_cost_per_token": 2.4e-7 + "output_cost_per_token": 2.4e-7, + "supports_vision": true, + "supports_function_calling": true, + "supports_response_schema": true }, "vercel_ai_gateway/amazon/nova-micro": { "input_cost_per_token": 3.5e-8, @@ -25183,7 +25962,9 @@ "max_output_tokens": 8192, "max_tokens": 8192, "mode": "chat", - "output_cost_per_token": 1.4e-7 + "output_cost_per_token": 1.4e-7, + "supports_function_calling": true, + "supports_response_schema": true }, "vercel_ai_gateway/amazon/nova-pro": { "input_cost_per_token": 8e-7, @@ -25192,7 +25973,10 @@ "max_output_tokens": 8192, "max_tokens": 8192, "mode": "chat", - "output_cost_per_token": 3.2e-6 + "output_cost_per_token": 3.2e-6, + "supports_vision": true, + "supports_function_calling": true, + "supports_response_schema": true }, "vercel_ai_gateway/amazon/titan-embed-text-v2": { "input_cost_per_token": 2e-8, @@ -25212,7 +25996,11 @@ "max_output_tokens": 4096, "max_tokens": 4096, "mode": "chat", - "output_cost_per_token": 1.25e-6 + "output_cost_per_token": 1.25e-6, + "supports_vision": true, + "supports_function_calling": true, + "supports_tool_choice": true, + "supports_response_schema": true }, "vercel_ai_gateway/anthropic/claude-3-opus": { "cache_creation_input_token_cost": 1.875e-5, @@ -25223,7 +26011,11 @@ "max_output_tokens": 4096, "max_tokens": 4096, "mode": "chat", - "output_cost_per_token": 7.5e-5 + "output_cost_per_token": 7.5e-5, + "supports_vision": true, + "supports_function_calling": true, + "supports_tool_choice": true, + "supports_response_schema": true }, "vercel_ai_gateway/anthropic/claude-3.5-haiku": { "cache_creation_input_token_cost": 1e-6, @@ -25234,7 +26026,11 @@ "max_output_tokens": 8192, "max_tokens": 8192, "mode": "chat", - "output_cost_per_token": 4e-6 + "output_cost_per_token": 4e-6, + "supports_vision": true, + "supports_function_calling": true, + "supports_tool_choice": true, + "supports_response_schema": true }, "vercel_ai_gateway/anthropic/claude-3.5-sonnet": { "cache_creation_input_token_cost": 3.75e-6, @@ -25245,7 +26041,11 @@ "max_output_tokens": 8192, "max_tokens": 8192, "mode": "chat", - "output_cost_per_token": 1.5e-5 + "output_cost_per_token": 1.5e-5, + "supports_vision": true, + "supports_function_calling": true, + "supports_tool_choice": true, + "supports_response_schema": true }, "vercel_ai_gateway/anthropic/claude-3.7-sonnet": { "cache_creation_input_token_cost": 3.75e-6, @@ -25256,7 +26056,11 @@ "max_output_tokens": 64000, "max_tokens": 64000, "mode": "chat", - "output_cost_per_token": 1.5e-5 + "output_cost_per_token": 1.5e-5, + "supports_vision": true, + "supports_function_calling": true, + "supports_tool_choice": true, + "supports_response_schema": true }, "vercel_ai_gateway/anthropic/claude-4-opus": { "cache_creation_input_token_cost": 1.875e-5, @@ -25267,7 +26071,11 @@ "max_output_tokens": 32000, "max_tokens": 32000, "mode": "chat", - "output_cost_per_token": 7.5e-5 + "output_cost_per_token": 7.5e-5, + "supports_vision": true, + "supports_function_calling": true, + "supports_tool_choice": true, + "supports_response_schema": true }, "vercel_ai_gateway/anthropic/claude-4-sonnet": { "cache_creation_input_token_cost": 3.75e-6, @@ -25278,7 +26086,9 @@ "max_output_tokens": 64000, "max_tokens": 64000, "mode": "chat", - "output_cost_per_token": 1.5e-5 + "output_cost_per_token": 1.5e-5, + "supports_function_calling": true, + "supports_tool_choice": true }, "vercel_ai_gateway/cohere/command-a": { "input_cost_per_token": 2.5e-6, @@ -25287,7 +26097,9 @@ "max_output_tokens": 8000, "max_tokens": 8000, "mode": "chat", - "output_cost_per_token": 1e-5 + "output_cost_per_token": 1e-5, + "supports_function_calling": true, + "supports_tool_choice": true }, "vercel_ai_gateway/cohere/command-r": { "input_cost_per_token": 1.5e-7, @@ -25296,7 +26108,9 @@ "max_output_tokens": 4096, "max_tokens": 4096, "mode": "chat", - "output_cost_per_token": 6e-7 + "output_cost_per_token": 6e-7, + "supports_function_calling": true, + "supports_tool_choice": true }, "vercel_ai_gateway/cohere/command-r-plus": { "input_cost_per_token": 2.5e-6, @@ -25305,7 +26119,9 @@ "max_output_tokens": 4096, "max_tokens": 4096, "mode": "chat", - "output_cost_per_token": 1e-5 + "output_cost_per_token": 1e-5, + "supports_function_calling": true, + "supports_tool_choice": true }, "vercel_ai_gateway/cohere/embed-v4.0": { "input_cost_per_token": 1.2e-7, @@ -25323,7 +26139,8 @@ "max_output_tokens": 8192, "max_tokens": 8192, "mode": "chat", - "output_cost_per_token": 2.19e-6 + "output_cost_per_token": 2.19e-6, + "supports_tool_choice": true }, "vercel_ai_gateway/deepseek/deepseek-r1-distill-llama-70b": { "input_cost_per_token": 7.5e-7, @@ -25332,7 +26149,10 @@ "max_output_tokens": 131072, "max_tokens": 131072, "mode": "chat", - "output_cost_per_token": 9.9e-7 + "output_cost_per_token": 9.9e-7, + "supports_function_calling": true, + "supports_tool_choice": true, + "supports_response_schema": true }, "vercel_ai_gateway/deepseek/deepseek-v3": { "input_cost_per_token": 9e-7, @@ -25341,25 +26161,36 @@ "max_output_tokens": 8192, "max_tokens": 8192, "mode": "chat", - "output_cost_per_token": 9e-7 + "output_cost_per_token": 9e-7, + "supports_tool_choice": true }, "vercel_ai_gateway/google/gemini-2.0-flash": { + "deprecation_date": "2026-03-31", "input_cost_per_token": 1.5e-7, "litellm_provider": "vercel_ai_gateway", "max_input_tokens": 1048576, "max_output_tokens": 8192, "max_tokens": 8192, "mode": "chat", - "output_cost_per_token": 6e-7 + "output_cost_per_token": 6e-7, + "supports_vision": true, + "supports_function_calling": true, + "supports_tool_choice": true, + "supports_response_schema": true }, "vercel_ai_gateway/google/gemini-2.0-flash-lite": { + "deprecation_date": "2026-03-31", "input_cost_per_token": 7.5e-8, "litellm_provider": "vercel_ai_gateway", "max_input_tokens": 1048576, "max_output_tokens": 8192, "max_tokens": 8192, "mode": "chat", - "output_cost_per_token": 3e-7 + "output_cost_per_token": 3e-7, + "supports_vision": true, + "supports_function_calling": true, + "supports_tool_choice": true, + "supports_response_schema": true }, "vercel_ai_gateway/google/gemini-2.5-flash": { "input_cost_per_token": 3e-7, @@ -25368,7 +26199,11 @@ "max_output_tokens": 65536, "max_tokens": 65536, "mode": "chat", - "output_cost_per_token": 2.5e-6 + "output_cost_per_token": 2.5e-6, + "supports_vision": true, + "supports_function_calling": true, + "supports_tool_choice": true, + "supports_response_schema": true }, "vercel_ai_gateway/google/gemini-2.5-pro": { "input_cost_per_token": 2.5e-6, @@ -25377,7 +26212,11 @@ "max_output_tokens": 65536, "max_tokens": 65536, "mode": "chat", - "output_cost_per_token": 1e-5 + "output_cost_per_token": 1e-5, + "supports_vision": true, + "supports_function_calling": true, + "supports_tool_choice": true, + "supports_response_schema": true }, "vercel_ai_gateway/google/gemini-embedding-001": { "input_cost_per_token": 1.5e-7, @@ -25395,7 +26234,10 @@ "max_output_tokens": 8192, "max_tokens": 8192, "mode": "chat", - "output_cost_per_token": 2e-7 + "output_cost_per_token": 2e-7, + "supports_vision": true, + "supports_function_calling": true, + "supports_tool_choice": true }, "vercel_ai_gateway/google/text-embedding-005": { "input_cost_per_token": 2.5e-8, @@ -25431,7 +26273,8 @@ "max_output_tokens": 8192, "max_tokens": 8192, "mode": "chat", - "output_cost_per_token": 7.9e-7 + "output_cost_per_token": 7.9e-7, + "supports_tool_choice": true }, "vercel_ai_gateway/meta/llama-3-8b": { "input_cost_per_token": 5e-8, @@ -25440,7 +26283,8 @@ "max_output_tokens": 8192, "max_tokens": 8192, "mode": "chat", - "output_cost_per_token": 8e-8 + "output_cost_per_token": 8e-8, + "supports_tool_choice": true }, "vercel_ai_gateway/meta/llama-3.1-70b": { "input_cost_per_token": 7.2e-7, @@ -25449,7 +26293,8 @@ "max_output_tokens": 8192, "max_tokens": 8192, "mode": "chat", - "output_cost_per_token": 7.2e-7 + "output_cost_per_token": 7.2e-7, + "supports_tool_choice": true }, "vercel_ai_gateway/meta/llama-3.1-8b": { "input_cost_per_token": 5e-8, @@ -25458,7 +26303,9 @@ "max_output_tokens": 131072, "max_tokens": 131072, "mode": "chat", - "output_cost_per_token": 8e-8 + "output_cost_per_token": 8e-8, + "supports_function_calling": true, + "supports_response_schema": true }, "vercel_ai_gateway/meta/llama-3.2-11b": { "input_cost_per_token": 1.6e-7, @@ -25467,7 +26314,10 @@ "max_output_tokens": 8192, "max_tokens": 8192, "mode": "chat", - "output_cost_per_token": 1.6e-7 + "output_cost_per_token": 1.6e-7, + "supports_vision": true, + "supports_function_calling": true, + "supports_tool_choice": true }, "vercel_ai_gateway/meta/llama-3.2-1b": { "input_cost_per_token": 1e-7, @@ -25485,7 +26335,9 @@ "max_output_tokens": 8192, "max_tokens": 8192, "mode": "chat", - "output_cost_per_token": 1.5e-7 + "output_cost_per_token": 1.5e-7, + "supports_function_calling": true, + "supports_response_schema": true }, "vercel_ai_gateway/meta/llama-3.2-90b": { "input_cost_per_token": 7.2e-7, @@ -25494,7 +26346,10 @@ "max_output_tokens": 8192, "max_tokens": 8192, "mode": "chat", - "output_cost_per_token": 7.2e-7 + "output_cost_per_token": 7.2e-7, + "supports_vision": true, + "supports_function_calling": true, + "supports_tool_choice": true }, "vercel_ai_gateway/meta/llama-3.3-70b": { "input_cost_per_token": 7.2e-7, @@ -25503,7 +26358,9 @@ "max_output_tokens": 8192, "max_tokens": 8192, "mode": "chat", - "output_cost_per_token": 7.2e-7 + "output_cost_per_token": 7.2e-7, + "supports_function_calling": true, + "supports_tool_choice": true }, "vercel_ai_gateway/meta/llama-4-maverick": { "input_cost_per_token": 2e-7, @@ -25512,7 +26369,8 @@ "max_output_tokens": 8192, "max_tokens": 8192, "mode": "chat", - "output_cost_per_token": 6e-7 + "output_cost_per_token": 6e-7, + "supports_tool_choice": true }, "vercel_ai_gateway/meta/llama-4-scout": { "input_cost_per_token": 1e-7, @@ -25521,7 +26379,10 @@ "max_output_tokens": 8192, "max_tokens": 8192, "mode": "chat", - "output_cost_per_token": 3e-7 + "output_cost_per_token": 3e-7, + "supports_vision": true, + "supports_function_calling": true, + "supports_tool_choice": true }, "vercel_ai_gateway/mistral/codestral": { "input_cost_per_token": 3e-7, @@ -25530,7 +26391,9 @@ "max_output_tokens": 4000, "max_tokens": 4000, "mode": "chat", - "output_cost_per_token": 9e-7 + "output_cost_per_token": 9e-7, + "supports_function_calling": true, + "supports_tool_choice": true }, "vercel_ai_gateway/mistral/codestral-embed": { "input_cost_per_token": 1.5e-7, @@ -25548,7 +26411,10 @@ "max_output_tokens": 128000, "max_tokens": 128000, "mode": "chat", - "output_cost_per_token": 2.8e-7 + "output_cost_per_token": 2.8e-7, + "supports_function_calling": true, + "supports_tool_choice": true, + "supports_response_schema": true }, "vercel_ai_gateway/mistral/magistral-medium": { "input_cost_per_token": 2e-6, @@ -25557,7 +26423,10 @@ "max_output_tokens": 64000, "max_tokens": 64000, "mode": "chat", - "output_cost_per_token": 5e-6 + "output_cost_per_token": 5e-6, + "supports_function_calling": true, + "supports_tool_choice": true, + "supports_response_schema": true }, "vercel_ai_gateway/mistral/magistral-small": { "input_cost_per_token": 5e-7, @@ -25566,7 +26435,8 @@ "max_output_tokens": 64000, "max_tokens": 64000, "mode": "chat", - "output_cost_per_token": 1.5e-6 + "output_cost_per_token": 1.5e-6, + "supports_function_calling": true }, "vercel_ai_gateway/mistral/ministral-3b": { "input_cost_per_token": 4e-8, @@ -25575,7 +26445,9 @@ "max_output_tokens": 4000, "max_tokens": 4000, "mode": "chat", - "output_cost_per_token": 4e-8 + "output_cost_per_token": 4e-8, + "supports_function_calling": true, + "supports_tool_choice": true }, "vercel_ai_gateway/mistral/ministral-8b": { "input_cost_per_token": 1e-7, @@ -25584,7 +26456,10 @@ "max_output_tokens": 4000, "max_tokens": 4000, "mode": "chat", - "output_cost_per_token": 1e-7 + "output_cost_per_token": 1e-7, + "supports_vision": true, + "supports_function_calling": true, + "supports_tool_choice": true }, "vercel_ai_gateway/mistral/mistral-embed": { "input_cost_per_token": 1e-7, @@ -25602,7 +26477,9 @@ "max_output_tokens": 4000, "max_tokens": 4000, "mode": "chat", - "output_cost_per_token": 6e-6 + "output_cost_per_token": 6e-6, + "supports_function_calling": true, + "supports_tool_choice": true }, "vercel_ai_gateway/mistral/mistral-saba-24b": { "input_cost_per_token": 7.9e-7, @@ -25620,7 +26497,10 @@ "max_output_tokens": 4000, "max_tokens": 4000, "mode": "chat", - "output_cost_per_token": 3e-7 + "output_cost_per_token": 3e-7, + "supports_function_calling": true, + "supports_tool_choice": true, + "supports_response_schema": true }, "vercel_ai_gateway/mistral/mixtral-8x22b-instruct": { "input_cost_per_token": 1.2e-6, @@ -25629,7 +26509,8 @@ "max_output_tokens": 2048, "max_tokens": 2048, "mode": "chat", - "output_cost_per_token": 1.2e-6 + "output_cost_per_token": 1.2e-6, + "supports_function_calling": true }, "vercel_ai_gateway/mistral/pixtral-12b": { "input_cost_per_token": 1.5e-7, @@ -25638,7 +26519,11 @@ "max_output_tokens": 4000, "max_tokens": 4000, "mode": "chat", - "output_cost_per_token": 1.5e-7 + "output_cost_per_token": 1.5e-7, + "supports_vision": true, + "supports_function_calling": true, + "supports_tool_choice": true, + "supports_response_schema": true }, "vercel_ai_gateway/mistral/pixtral-large": { "input_cost_per_token": 2e-6, @@ -25647,7 +26532,11 @@ "max_output_tokens": 4000, "max_tokens": 4000, "mode": "chat", - "output_cost_per_token": 6e-6 + "output_cost_per_token": 6e-6, + "supports_vision": true, + "supports_function_calling": true, + "supports_tool_choice": true, + "supports_response_schema": true }, "vercel_ai_gateway/moonshotai/kimi-k2": { "input_cost_per_token": 5.5e-7, @@ -25656,7 +26545,9 @@ "max_output_tokens": 16384, "max_tokens": 16384, "mode": "chat", - "output_cost_per_token": 2.2e-6 + "output_cost_per_token": 2.2e-6, + "supports_function_calling": true, + "supports_tool_choice": true }, "vercel_ai_gateway/morph/morph-v3-fast": { "input_cost_per_token": 8e-7, @@ -25683,7 +26574,9 @@ "max_output_tokens": 4096, "max_tokens": 4096, "mode": "chat", - "output_cost_per_token": 1.5e-6 + "output_cost_per_token": 1.5e-6, + "supports_function_calling": true, + "supports_tool_choice": true }, "vercel_ai_gateway/openai/gpt-3.5-turbo-instruct": { "input_cost_per_token": 1.5e-6, @@ -25701,7 +26594,10 @@ "max_output_tokens": 4096, "max_tokens": 4096, "mode": "chat", - "output_cost_per_token": 3e-5 + "output_cost_per_token": 3e-5, + "supports_vision": true, + "supports_function_calling": true, + "supports_tool_choice": true }, "vercel_ai_gateway/openai/gpt-4.1": { "cache_creation_input_token_cost": 0.0, @@ -25712,7 +26608,11 @@ "max_output_tokens": 32768, "max_tokens": 32768, "mode": "chat", - "output_cost_per_token": 8e-6 + "output_cost_per_token": 8e-6, + "supports_vision": true, + "supports_function_calling": true, + "supports_tool_choice": true, + "supports_response_schema": true }, "vercel_ai_gateway/openai/gpt-4.1-mini": { "cache_creation_input_token_cost": 0.0, @@ -25723,7 +26623,11 @@ "max_output_tokens": 32768, "max_tokens": 32768, "mode": "chat", - "output_cost_per_token": 1.6e-6 + "output_cost_per_token": 1.6e-6, + "supports_vision": true, + "supports_function_calling": true, + "supports_tool_choice": true, + "supports_response_schema": true }, "vercel_ai_gateway/openai/gpt-4.1-nano": { "cache_creation_input_token_cost": 0.0, @@ -25734,7 +26638,11 @@ "max_output_tokens": 32768, "max_tokens": 32768, "mode": "chat", - "output_cost_per_token": 4e-7 + "output_cost_per_token": 4e-7, + "supports_vision": true, + "supports_function_calling": true, + "supports_tool_choice": true, + "supports_response_schema": true }, "vercel_ai_gateway/openai/gpt-4o": { "cache_creation_input_token_cost": 0.0, @@ -25745,7 +26653,11 @@ "max_output_tokens": 16384, "max_tokens": 16384, "mode": "chat", - "output_cost_per_token": 1e-5 + "output_cost_per_token": 1e-5, + "supports_vision": true, + "supports_function_calling": true, + "supports_tool_choice": true, + "supports_response_schema": true }, "vercel_ai_gateway/openai/gpt-4o-mini": { "cache_creation_input_token_cost": 0.0, @@ -25756,7 +26668,11 @@ "max_output_tokens": 16384, "max_tokens": 16384, "mode": "chat", - "output_cost_per_token": 6e-7 + "output_cost_per_token": 6e-7, + "supports_vision": true, + "supports_function_calling": true, + "supports_tool_choice": true, + "supports_response_schema": true }, "vercel_ai_gateway/openai/o1": { "cache_creation_input_token_cost": 0.0, @@ -25767,7 +26683,11 @@ "max_output_tokens": 100000, "max_tokens": 100000, "mode": "chat", - "output_cost_per_token": 6e-5 + "output_cost_per_token": 6e-5, + "supports_vision": true, + "supports_function_calling": true, + "supports_tool_choice": true, + "supports_response_schema": true }, "vercel_ai_gateway/openai/o3": { "cache_creation_input_token_cost": 0.0, @@ -25778,7 +26698,11 @@ "max_output_tokens": 100000, "max_tokens": 100000, "mode": "chat", - "output_cost_per_token": 8e-6 + "output_cost_per_token": 8e-6, + "supports_vision": true, + "supports_function_calling": true, + "supports_tool_choice": true, + "supports_response_schema": true }, "vercel_ai_gateway/openai/o3-mini": { "cache_creation_input_token_cost": 0.0, @@ -25789,7 +26713,10 @@ "max_output_tokens": 100000, "max_tokens": 100000, "mode": "chat", - "output_cost_per_token": 4.4e-6 + "output_cost_per_token": 4.4e-6, + "supports_function_calling": true, + "supports_tool_choice": true, + "supports_response_schema": true }, "vercel_ai_gateway/openai/o4-mini": { "cache_creation_input_token_cost": 0.0, @@ -25800,7 +26727,11 @@ "max_output_tokens": 100000, "max_tokens": 100000, "mode": "chat", - "output_cost_per_token": 4.4e-6 + "output_cost_per_token": 4.4e-6, + "supports_vision": true, + "supports_function_calling": true, + "supports_tool_choice": true, + "supports_response_schema": true }, "vercel_ai_gateway/openai/text-embedding-3-large": { "input_cost_per_token": 1.3e-7, @@ -25872,7 +26803,10 @@ "max_output_tokens": 32000, "max_tokens": 32000, "mode": "chat", - "output_cost_per_token": 1.5e-5 + "output_cost_per_token": 1.5e-5, + "supports_vision": true, + "supports_function_calling": true, + "supports_tool_choice": true }, "vercel_ai_gateway/vercel/v0-1.5-md": { "input_cost_per_token": 3e-6, @@ -25881,7 +26815,10 @@ "max_output_tokens": 32768, "max_tokens": 32768, "mode": "chat", - "output_cost_per_token": 1.5e-5 + "output_cost_per_token": 1.5e-5, + "supports_vision": true, + "supports_function_calling": true, + "supports_tool_choice": true }, "vercel_ai_gateway/xai/grok-2": { "input_cost_per_token": 2e-6, @@ -25890,7 +26827,9 @@ "max_output_tokens": 4000, "max_tokens": 4000, "mode": "chat", - "output_cost_per_token": 1e-5 + "output_cost_per_token": 1e-5, + "supports_function_calling": true, + "supports_tool_choice": true }, "vercel_ai_gateway/xai/grok-2-vision": { "input_cost_per_token": 2e-6, @@ -25899,7 +26838,10 @@ "max_output_tokens": 32768, "max_tokens": 32768, "mode": "chat", - "output_cost_per_token": 1e-5 + "output_cost_per_token": 1e-5, + "supports_vision": true, + "supports_function_calling": true, + "supports_tool_choice": true }, "vercel_ai_gateway/xai/grok-3": { "input_cost_per_token": 3e-6, @@ -25908,7 +26850,9 @@ "max_output_tokens": 131072, "max_tokens": 131072, "mode": "chat", - "output_cost_per_token": 1.5e-5 + "output_cost_per_token": 1.5e-5, + "supports_function_calling": true, + "supports_tool_choice": true }, "vercel_ai_gateway/xai/grok-3-fast": { "input_cost_per_token": 5e-6, @@ -25917,7 +26861,8 @@ "max_output_tokens": 131072, "max_tokens": 131072, "mode": "chat", - "output_cost_per_token": 2.5e-5 + "output_cost_per_token": 2.5e-5, + "supports_function_calling": true }, "vercel_ai_gateway/xai/grok-3-mini": { "input_cost_per_token": 3e-7, @@ -25926,7 +26871,9 @@ "max_output_tokens": 131072, "max_tokens": 131072, "mode": "chat", - "output_cost_per_token": 5e-7 + "output_cost_per_token": 5e-7, + "supports_function_calling": true, + "supports_tool_choice": true }, "vercel_ai_gateway/xai/grok-3-mini-fast": { "input_cost_per_token": 6e-7, @@ -25935,7 +26882,9 @@ "max_output_tokens": 131072, "max_tokens": 131072, "mode": "chat", - "output_cost_per_token": 4e-6 + "output_cost_per_token": 4e-6, + "supports_function_calling": true, + "supports_tool_choice": true }, "vercel_ai_gateway/xai/grok-4": { "input_cost_per_token": 3e-6, @@ -25944,7 +26893,9 @@ "max_output_tokens": 256000, "max_tokens": 256000, "mode": "chat", - "output_cost_per_token": 1.5e-5 + "output_cost_per_token": 1.5e-5, + "supports_function_calling": true, + "supports_tool_choice": true }, "vercel_ai_gateway/zai/glm-4.5": { "input_cost_per_token": 6e-7, @@ -25953,7 +26904,9 @@ "max_output_tokens": 131072, "max_tokens": 131072, "mode": "chat", - "output_cost_per_token": 2.2e-6 + "output_cost_per_token": 2.2e-6, + "supports_function_calling": true, + "supports_tool_choice": true }, "vercel_ai_gateway/zai/glm-4.5-air": { "input_cost_per_token": 2e-7, @@ -25962,7 +26915,9 @@ "max_output_tokens": 96000, "max_tokens": 96000, "mode": "chat", - "output_cost_per_token": 1.1e-6 + "output_cost_per_token": 1.1e-6, + "supports_function_calling": true, + "supports_tool_choice": true }, "vercel_ai_gateway/zai/glm-4.6": { "litellm_provider": "vercel_ai_gateway", @@ -26028,7 +26983,9 @@ "supports_prompt_caching": true, "supports_reasoning": true, "supports_response_schema": true, - "supports_tool_choice": true + "supports_tool_choice": true, + "supports_native_streaming": true, + "supports_vision": true }, "vertex_ai/claude-3-5-sonnet": { "input_cost_per_token": 3e-6, @@ -26299,7 +27256,8 @@ "supports_response_schema": true, "supports_tool_choice": true, "supports_vision": true, - "tool_use_system_prompt_tokens": 159 + "tool_use_system_prompt_tokens": 159, + "supports_native_streaming": true }, "vertex_ai/claude-sonnet-4-5": { "cache_creation_input_token_cost": 3.75e-6, @@ -26351,7 +27309,8 @@ "supports_reasoning": true, "supports_response_schema": true, "supports_tool_choice": true, - "supports_vision": true + "supports_vision": true, + "supports_native_streaming": true }, "vertex_ai/claude-opus-4@20250514": { "cache_creation_input_token_cost": 1.875e-5, @@ -26621,6 +27580,21 @@ "output_cost_per_token_batches": 6e-6, "source": "https://docs.cloud.google.com/vertex-ai/generative-ai/docs/models/gemini/3-pro-image" }, + "vertex_ai/deep-research-pro-preview-12-2025": { + "input_cost_per_image": 0.0011, + "input_cost_per_token": 2e-6, + "input_cost_per_token_batches": 1e-6, + "litellm_provider": "vertex_ai-language-models", + "max_input_tokens": 65536, + "max_output_tokens": 32768, + "max_tokens": 32768, + "mode": "image_generation", + "output_cost_per_image": 0.134, + "output_cost_per_image_token": 0.00012, + "output_cost_per_token": 1.2e-5, + "output_cost_per_token_batches": 6e-6, + "source": "https://docs.cloud.google.com/vertex-ai/generative-ai/docs/models/gemini/3-pro-image" + }, "vertex_ai/imagegeneration@006": { "litellm_provider": "vertex_ai-image-models", "mode": "image_generation", @@ -27084,6 +28058,7 @@ "mode": "chat", "output_cost_per_token": 1e-6, "source": "https://cloud.google.com/vertex-ai/generative-ai/pricing", + "supported_regions": ["global"], "supports_function_calling": true, "supports_tool_choice": true }, @@ -27096,6 +28071,7 @@ "mode": "chat", "output_cost_per_token": 4e-6, "source": "https://cloud.google.com/vertex-ai/generative-ai/pricing", + "supported_regions": ["global"], "supports_function_calling": true, "supports_tool_choice": true }, @@ -27108,6 +28084,7 @@ "mode": "chat", "output_cost_per_token": 1.2e-6, "source": "https://cloud.google.com/vertex-ai/generative-ai/pricing", + "supported_regions": ["global"], "supports_function_calling": true, "supports_tool_choice": true }, @@ -27120,6 +28097,7 @@ "mode": "chat", "output_cost_per_token": 1.2e-6, "source": "https://cloud.google.com/vertex-ai/generative-ai/pricing", + "supported_regions": ["global"], "supports_function_calling": true, "supports_tool_choice": true }, @@ -27942,6 +28920,7 @@ "supports_web_search": true }, "xai/grok-3": { + "cache_read_input_token_cost": 7.5e-7, "input_cost_per_token": 3e-6, "litellm_provider": "xai", "max_input_tokens": 131072, @@ -27956,6 +28935,7 @@ "supports_web_search": true }, "xai/grok-3-beta": { + "cache_read_input_token_cost": 7.5e-7, "input_cost_per_token": 3e-6, "litellm_provider": "xai", "max_input_tokens": 131072, @@ -27970,6 +28950,7 @@ "supports_web_search": true }, "xai/grok-3-fast-beta": { + "cache_read_input_token_cost": 1.25e-6, "input_cost_per_token": 5e-6, "litellm_provider": "xai", "max_input_tokens": 131072, @@ -27984,6 +28965,7 @@ "supports_web_search": true }, "xai/grok-3-fast-latest": { + "cache_read_input_token_cost": 1.25e-6, "input_cost_per_token": 5e-6, "litellm_provider": "xai", "max_input_tokens": 131072, @@ -27998,6 +28980,7 @@ "supports_web_search": true }, "xai/grok-3-latest": { + "cache_read_input_token_cost": 7.5e-7, "input_cost_per_token": 3e-6, "litellm_provider": "xai", "max_input_tokens": 131072, @@ -28012,6 +28995,7 @@ "supports_web_search": true }, "xai/grok-3-mini": { + "cache_read_input_token_cost": 7.5e-8, "input_cost_per_token": 3e-7, "litellm_provider": "xai", "max_input_tokens": 131072, @@ -28027,6 +29011,7 @@ "supports_web_search": true }, "xai/grok-3-mini-beta": { + "cache_read_input_token_cost": 7.5e-8, "input_cost_per_token": 3e-7, "litellm_provider": "xai", "max_input_tokens": 131072, @@ -28042,6 +29027,7 @@ "supports_web_search": true }, "xai/grok-3-mini-fast": { + "cache_read_input_token_cost": 1.5e-7, "input_cost_per_token": 6e-7, "litellm_provider": "xai", "max_input_tokens": 131072, @@ -28057,6 +29043,7 @@ "supports_web_search": true }, "xai/grok-3-mini-fast-beta": { + "cache_read_input_token_cost": 1.5e-7, "input_cost_per_token": 6e-7, "litellm_provider": "xai", "max_input_tokens": 131072, @@ -28072,6 +29059,7 @@ "supports_web_search": true }, "xai/grok-3-mini-fast-latest": { + "cache_read_input_token_cost": 1.5e-7, "input_cost_per_token": 6e-7, "litellm_provider": "xai", "max_input_tokens": 131072, @@ -28087,6 +29075,7 @@ "supports_web_search": true }, "xai/grok-3-mini-latest": { + "cache_read_input_token_cost": 7.5e-8, "input_cost_per_token": 3e-7, "litellm_provider": "xai", "max_input_tokens": 131072, @@ -28343,6 +29332,36 @@ "supports_vision": true, "supports_web_search": true }, + "zai/glm-5": { + "cache_creation_input_token_cost": 0, + "cache_read_input_token_cost": 2e-7, + "input_cost_per_token": 1e-6, + "output_cost_per_token": 3.2e-6, + "litellm_provider": "zai", + "max_input_tokens": 200000, + "max_output_tokens": 128000, + "mode": "chat", + "supports_function_calling": true, + "supports_prompt_caching": true, + "supports_reasoning": true, + "supports_tool_choice": true, + "source": "https://docs.z.ai/guides/overview/pricing" + }, + "zai/glm-5-code": { + "cache_creation_input_token_cost": 0, + "cache_read_input_token_cost": 3e-7, + "input_cost_per_token": 1.2e-6, + "output_cost_per_token": 5e-6, + "litellm_provider": "zai", + "max_input_tokens": 200000, + "max_output_tokens": 128000, + "mode": "chat", + "supports_function_calling": true, + "supports_prompt_caching": true, + "supports_reasoning": true, + "supports_tool_choice": true, + "source": "https://docs.z.ai/guides/overview/pricing" + }, "zai/glm-4.7": { "cache_creation_input_token_cost": 0, "cache_read_input_token_cost": 1.1e-7, @@ -28353,11 +29372,14 @@ "max_output_tokens": 128000, "mode": "chat", "supports_function_calling": true, + "supports_prompt_caching": true, "supports_reasoning": true, "supports_tool_choice": true, "source": "https://docs.z.ai/guides/overview/pricing" }, "zai/glm-4.6": { + "cache_creation_input_token_cost": 0, + "cache_read_input_token_cost": 1.1e-7, "input_cost_per_token": 6e-7, "output_cost_per_token": 2.2e-6, "litellm_provider": "zai", @@ -28365,6 +29387,8 @@ "max_output_tokens": 128000, "mode": "chat", "supports_function_calling": true, + "supports_prompt_caching": true, + "supports_reasoning": true, "supports_tool_choice": true, "source": "https://docs.z.ai/guides/overview/pricing" }, @@ -31921,5 +32945,18 @@ "output_cost_per_token": 0, "litellm_provider": "llamagate", "mode": "embedding" + }, + "sarvam/sarvam-m": { + "cache_creation_input_token_cost": 0, + "cache_creation_input_token_cost_above_1hr": 0, + "cache_read_input_token_cost": 0, + "input_cost_per_token": 0, + "litellm_provider": "sarvam", + "max_input_tokens": 8192, + "max_output_tokens": 32000, + "max_tokens": 32000, + "mode": "chat", + "output_cost_per_token": 0, + "supports_reasoning": true } } diff --git a/letta/orm/__init__.py b/letta/orm/__init__.py index 72e1112a..04fa066a 100644 --- a/letta/orm/__init__.py +++ b/letta/orm/__init__.py @@ -1,44 +1,48 @@ -from letta.orm.agent import Agent -from letta.orm.agents_tags import AgentsTags -from letta.orm.archive import Archive -from letta.orm.archives_agents import ArchivesAgents -from letta.orm.base import Base -from letta.orm.block import Block -from letta.orm.block_history import BlockHistory -from letta.orm.blocks_agents import BlocksAgents -from letta.orm.blocks_conversations import BlocksConversations -from letta.orm.blocks_tags import BlocksTags -from letta.orm.conversation import Conversation -from letta.orm.conversation_messages import ConversationMessage -from letta.orm.file import FileMetadata -from letta.orm.files_agents import FileAgent -from letta.orm.group import Group -from letta.orm.groups_agents import GroupsAgents -from letta.orm.groups_blocks import GroupsBlocks -from letta.orm.identities_agents import IdentitiesAgents -from letta.orm.identities_blocks import IdentitiesBlocks -from letta.orm.identity import Identity -from letta.orm.job import Job -from letta.orm.llm_batch_items import LLMBatchItem -from letta.orm.llm_batch_job import LLMBatchJob -from letta.orm.mcp_oauth import MCPOAuth -from letta.orm.mcp_server import MCPServer -from letta.orm.message import Message -from letta.orm.organization import Organization -from letta.orm.passage import ArchivalPassage, BasePassage, SourcePassage -from letta.orm.passage_tag import PassageTag -from letta.orm.prompt import Prompt -from letta.orm.provider import Provider -from letta.orm.provider_model import ProviderModel -from letta.orm.provider_trace import ProviderTrace -from letta.orm.provider_trace_metadata import ProviderTraceMetadata -from letta.orm.run import Run -from letta.orm.run_metrics import RunMetrics -from letta.orm.sandbox_config import AgentEnvironmentVariable, SandboxConfig, SandboxEnvironmentVariable -from letta.orm.source import Source -from letta.orm.sources_agents import SourcesAgents -from letta.orm.step import Step -from letta.orm.step_metrics import StepMetrics -from letta.orm.tool import Tool -from letta.orm.tools_agents import ToolsAgents -from letta.orm.user import User +from letta.orm.agent import Agent as Agent +from letta.orm.agents_tags import AgentsTags as AgentsTags +from letta.orm.archive import Archive as Archive +from letta.orm.archives_agents import ArchivesAgents as ArchivesAgents +from letta.orm.base import Base as Base +from letta.orm.block import Block as Block +from letta.orm.block_history import BlockHistory as BlockHistory +from letta.orm.blocks_agents import BlocksAgents as BlocksAgents +from letta.orm.blocks_conversations import BlocksConversations as BlocksConversations +from letta.orm.blocks_tags import BlocksTags as BlocksTags +from letta.orm.conversation import Conversation as Conversation +from letta.orm.conversation_messages import ConversationMessage as ConversationMessage +from letta.orm.file import FileMetadata as FileMetadata +from letta.orm.files_agents import FileAgent as FileAgent +from letta.orm.group import Group as Group +from letta.orm.groups_agents import GroupsAgents as GroupsAgents +from letta.orm.groups_blocks import GroupsBlocks as GroupsBlocks +from letta.orm.identities_agents import IdentitiesAgents as IdentitiesAgents +from letta.orm.identities_blocks import IdentitiesBlocks as IdentitiesBlocks +from letta.orm.identity import Identity as Identity +from letta.orm.job import Job as Job +from letta.orm.llm_batch_items import LLMBatchItem as LLMBatchItem +from letta.orm.llm_batch_job import LLMBatchJob as LLMBatchJob +from letta.orm.mcp_oauth import MCPOAuth as MCPOAuth +from letta.orm.mcp_server import MCPServer as MCPServer +from letta.orm.message import Message as Message +from letta.orm.organization import Organization as Organization +from letta.orm.passage import ArchivalPassage as ArchivalPassage, BasePassage as BasePassage, SourcePassage as SourcePassage +from letta.orm.passage_tag import PassageTag as PassageTag +from letta.orm.prompt import Prompt as Prompt +from letta.orm.provider import Provider as Provider +from letta.orm.provider_model import ProviderModel as ProviderModel +from letta.orm.provider_trace import ProviderTrace as ProviderTrace +from letta.orm.provider_trace_metadata import ProviderTraceMetadata as ProviderTraceMetadata +from letta.orm.run import Run as Run +from letta.orm.run_metrics import RunMetrics as RunMetrics +from letta.orm.sandbox_config import ( + AgentEnvironmentVariable as AgentEnvironmentVariable, + SandboxConfig as SandboxConfig, + SandboxEnvironmentVariable as SandboxEnvironmentVariable, +) +from letta.orm.source import Source as Source +from letta.orm.sources_agents import SourcesAgents as SourcesAgents +from letta.orm.step import Step as Step +from letta.orm.step_metrics import StepMetrics as StepMetrics +from letta.orm.tool import Tool as Tool +from letta.orm.tools_agents import ToolsAgents as ToolsAgents +from letta.orm.user import User as User diff --git a/letta/orm/agent.py b/letta/orm/agent.py index 1670e7f1..b6d32720 100644 --- a/letta/orm/agent.py +++ b/letta/orm/agent.py @@ -32,12 +32,14 @@ if TYPE_CHECKING: from letta.orm.archives_agents import ArchivesAgents from letta.orm.conversation import Conversation from letta.orm.files_agents import FileAgent + from letta.orm.group import Group from letta.orm.identity import Identity + from letta.orm.llm_batch_items import LLMBatchItem from letta.orm.organization import Organization from letta.orm.run import Run + from letta.orm.sandbox_config import AgentEnvironmentVariable from letta.orm.source import Source from letta.orm.tool import Tool - from letta.services.summarizer.summarizer_config import CompactionSettings class Agent(SqlalchemyBase, OrganizationMixin, ProjectMixin, TemplateEntityMixin, TemplateMixin, AsyncAttrs): @@ -286,6 +288,7 @@ class Agent(SqlalchemyBase, OrganizationMixin, ProjectMixin, TemplateEntityMixin is not None ], agent_type=self.agent_type, + git_enabled=any(t.tag == "git-memory-enabled" for t in self.tags), ), "blocks": lambda: [b.to_pydantic() for b in self.core_memory], "identity_ids": lambda: [i.id for i in self.identities], @@ -418,7 +421,15 @@ class Agent(SqlalchemyBase, OrganizationMixin, ProjectMixin, TemplateEntityMixin return None # Only load requested relationships - tags = self.awaitable_attrs.tags if "tags" in include_relationships or "agent.tags" in include_set else empty_list_async() + # Always load tags when memory is requested, since git_enabled depends on them + tags = ( + self.awaitable_attrs.tags + if "tags" in include_relationships + or "memory" in include_relationships + or "agent.tags" in include_set + or "agent.blocks" in include_set + else empty_list_async() + ) tools = self.awaitable_attrs.tools if "tools" in include_relationships or "agent.tools" in include_set else empty_list_async() sources = ( self.awaitable_attrs.sources if "sources" in include_relationships or "agent.sources" in include_set else empty_list_async() @@ -473,6 +484,7 @@ class Agent(SqlalchemyBase, OrganizationMixin, ProjectMixin, TemplateEntityMixin if (block := b.to_pydantic_block(per_file_view_window_char_limit=self._get_per_file_view_window_char_limit())) is not None ], agent_type=self.agent_type, + git_enabled="git-memory-enabled" in state["tags"], ) state["blocks"] = [m.to_pydantic() for m in memory] state["identity_ids"] = [i.id for i in identities] diff --git a/letta/orm/agents_tags.py b/letta/orm/agents_tags.py index d7177083..a61a59b2 100644 --- a/letta/orm/agents_tags.py +++ b/letta/orm/agents_tags.py @@ -1,3 +1,8 @@ +from typing import TYPE_CHECKING + +if TYPE_CHECKING: + from letta.orm.agent import Agent + from sqlalchemy import ForeignKey, Index, String, UniqueConstraint from sqlalchemy.orm import Mapped, mapped_column, relationship diff --git a/letta/orm/archives_agents.py b/letta/orm/archives_agents.py index 06c63a5e..85472408 100644 --- a/letta/orm/archives_agents.py +++ b/letta/orm/archives_agents.py @@ -1,4 +1,9 @@ from datetime import datetime +from typing import TYPE_CHECKING + +if TYPE_CHECKING: + from letta.orm.agent import Agent + from letta.orm.archive import Archive from sqlalchemy import Boolean, DateTime, ForeignKey, String, UniqueConstraint from sqlalchemy.orm import Mapped, mapped_column, relationship diff --git a/letta/orm/base.py b/letta/orm/base.py index 8145dfcb..c9a056d0 100644 --- a/letta/orm/base.py +++ b/letta/orm/base.py @@ -78,7 +78,7 @@ class CommonSqlalchemyMetaMixins(Base): setattr(self, full_prop, None) return # Safety check - prefix, id_ = value.split("-", 1) + prefix, _id = value.split("-", 1) assert prefix == "user", f"{prefix} is not a valid id prefix for a user id" # Set the full value diff --git a/letta/orm/block.py b/letta/orm/block.py index 08f5fb28..7a73ee47 100644 --- a/letta/orm/block.py +++ b/letta/orm/block.py @@ -1,18 +1,19 @@ -from typing import TYPE_CHECKING, List, Optional, Type +from typing import TYPE_CHECKING, ClassVar, List, Optional, Type from sqlalchemy import JSON, BigInteger, ForeignKey, Index, Integer, String, UniqueConstraint, event from sqlalchemy.orm import Mapped, declared_attr, mapped_column, relationship from letta.constants import CORE_MEMORY_BLOCK_CHAR_LIMIT from letta.orm.block_history import BlockHistory -from letta.orm.blocks_agents import BlocksAgents from letta.orm.mixins import OrganizationMixin, ProjectMixin, TemplateEntityMixin, TemplateMixin from letta.orm.sqlalchemy_base import SqlalchemyBase from letta.schemas.block import Block as PydanticBlock, Human, Persona if TYPE_CHECKING: from letta.orm import Organization + from letta.orm.agent import Agent from letta.orm.blocks_tags import BlocksTags + from letta.orm.group import Group from letta.orm.identity import Identity @@ -57,7 +58,7 @@ class Block(OrganizationMixin, SqlalchemyBase, ProjectMixin, TemplateEntityMixin ) # NOTE: This takes advantage of built-in optimistic locking functionality by SqlAlchemy # https://docs.sqlalchemy.org/en/20/orm/versioning.html - __mapper_args__ = {"version_id_col": version} + __mapper_args__: ClassVar[dict] = {"version_id_col": version} # relationships organization: Mapped[Optional["Organization"]] = relationship("Organization", lazy="raise") @@ -92,9 +93,9 @@ class Block(OrganizationMixin, SqlalchemyBase, ProjectMixin, TemplateEntityMixin def to_pydantic(self) -> Type: match self.label: - case "human": + case "human" | "system/human": Schema = Human - case "persona": + case "persona" | "system/persona": Schema = Persona case _: Schema = PydanticBlock diff --git a/letta/orm/blocks_tags.py b/letta/orm/blocks_tags.py index 23412df8..0f7969dc 100644 --- a/letta/orm/blocks_tags.py +++ b/letta/orm/blocks_tags.py @@ -1,5 +1,8 @@ from datetime import datetime -from typing import Optional +from typing import TYPE_CHECKING, Optional + +if TYPE_CHECKING: + from letta.orm.block import Block from sqlalchemy import Boolean, DateTime, ForeignKey, Index, String, UniqueConstraint, func, text from sqlalchemy.orm import Mapped, mapped_column, relationship diff --git a/letta/orm/conversation.py b/letta/orm/conversation.py index a3fe7a9f..d7d9a254 100644 --- a/letta/orm/conversation.py +++ b/letta/orm/conversation.py @@ -1,18 +1,22 @@ import uuid from typing import TYPE_CHECKING, List, Optional -from sqlalchemy import ForeignKey, Index, String +from pydantic import TypeAdapter +from sqlalchemy import JSON, ForeignKey, Index, String from sqlalchemy.orm import Mapped, mapped_column, relationship from letta.orm.mixins import OrganizationMixin from letta.orm.sqlalchemy_base import SqlalchemyBase from letta.schemas.conversation import Conversation as PydanticConversation +from letta.schemas.model import ModelSettingsUnion if TYPE_CHECKING: from letta.orm.agent import Agent from letta.orm.block import Block from letta.orm.conversation_messages import ConversationMessage +_model_settings_adapter = TypeAdapter(ModelSettingsUnion) + class Conversation(SqlalchemyBase, OrganizationMixin): """Conversations that can be created on an agent for concurrent messaging.""" @@ -27,6 +31,12 @@ class Conversation(SqlalchemyBase, OrganizationMixin): id: Mapped[str] = mapped_column(String, primary_key=True, default=lambda: f"conv-{uuid.uuid4()}") agent_id: Mapped[str] = mapped_column(String, ForeignKey("agents.id", ondelete="CASCADE"), nullable=False) summary: Mapped[Optional[str]] = mapped_column(String, nullable=True, doc="Summary of the conversation") + model: Mapped[Optional[str]] = mapped_column( + String, nullable=True, doc="Model handle override for this conversation (format: provider/model-name)" + ) + model_settings: Mapped[Optional[dict]] = mapped_column( + JSON, nullable=True, doc="Model settings override for this conversation (provider-specific settings)" + ) # Relationships agent: Mapped["Agent"] = relationship("Agent", back_populates="conversations", lazy="raise") @@ -55,4 +65,6 @@ class Conversation(SqlalchemyBase, OrganizationMixin): created_by_id=self.created_by_id, last_updated_by_id=self.last_updated_by_id, isolated_block_ids=[b.id for b in self.isolated_blocks] if self.isolated_blocks else [], + model=self.model, + model_settings=_model_settings_adapter.validate_python(self.model_settings) if self.model_settings else None, ) diff --git a/letta/orm/errors.py b/letta/orm/errors.py index a574e74c..f9e3069d 100644 --- a/letta/orm/errors.py +++ b/letta/orm/errors.py @@ -14,9 +14,25 @@ class ForeignKeyConstraintViolationError(ValueError): """Custom exception for foreign key constraint violations.""" +class DatabaseLockNotAvailableError(Exception): + """Raised when a database lock cannot be acquired (PostgreSQL 55P03).""" + + def __init__(self, message="Could not acquire database lock", original_exception=None): + super().__init__(message) + self.original_exception = original_exception + + class DatabaseTimeoutError(Exception): """Custom exception for database timeout issues.""" def __init__(self, message="Database operation timed out", original_exception=None): super().__init__(message) self.original_exception = original_exception + + +class DatabaseDeadlockError(Exception): + """Custom exception for database deadlock errors (PostgreSQL error code 40P01).""" + + def __init__(self, message="A database deadlock was detected", original_exception=None): + super().__init__(message) + self.original_exception = original_exception diff --git a/letta/orm/files_agents.py b/letta/orm/files_agents.py index 1c768711..f9486cbe 100644 --- a/letta/orm/files_agents.py +++ b/letta/orm/files_agents.py @@ -12,7 +12,7 @@ from letta.schemas.file import FileAgent as PydanticFileAgent from letta.utils import truncate_file_visible_content if TYPE_CHECKING: - pass + from letta.orm.agent import Agent class FileAgent(SqlalchemyBase, OrganizationMixin): diff --git a/letta/orm/group.py b/letta/orm/group.py index 5b2c7e57..7fe3298a 100644 --- a/letta/orm/group.py +++ b/letta/orm/group.py @@ -1,5 +1,10 @@ import uuid -from typing import List, Optional +from typing import TYPE_CHECKING, List, Optional + +if TYPE_CHECKING: + from letta.orm.agent import Agent + from letta.orm.block import Block + from letta.orm.organization import Organization from sqlalchemy import JSON, ForeignKey, String from sqlalchemy.orm import Mapped, mapped_column, relationship diff --git a/letta/orm/identity.py b/letta/orm/identity.py index dd7ae51c..5badaf81 100644 --- a/letta/orm/identity.py +++ b/letta/orm/identity.py @@ -1,5 +1,10 @@ import uuid -from typing import List +from typing import TYPE_CHECKING, List + +if TYPE_CHECKING: + from letta.orm.agent import Agent + from letta.orm.block import Block + from letta.orm.organization import Organization from sqlalchemy import String, UniqueConstraint from sqlalchemy.dialects.postgresql import JSON diff --git a/letta/orm/llm_batch_items.py b/letta/orm/llm_batch_items.py index b4f08cb0..e027af25 100644 --- a/letta/orm/llm_batch_items.py +++ b/letta/orm/llm_batch_items.py @@ -1,5 +1,10 @@ import uuid -from typing import Optional, Union +from typing import TYPE_CHECKING, Optional, Union + +if TYPE_CHECKING: + from letta.orm.agent import Agent + from letta.orm.llm_batch_job import LLMBatchJob + from letta.orm.organization import Organization from anthropic.types.beta.messages import BetaMessageBatchIndividualResponse from sqlalchemy import ForeignKey, Index, String diff --git a/letta/orm/llm_batch_job.py b/letta/orm/llm_batch_job.py index db085dc7..a3b09e7b 100644 --- a/letta/orm/llm_batch_job.py +++ b/letta/orm/llm_batch_job.py @@ -1,6 +1,10 @@ import uuid from datetime import datetime -from typing import List, Optional, Union +from typing import TYPE_CHECKING, List, Optional, Union + +if TYPE_CHECKING: + from letta.orm.llm_batch_items import LLMBatchItem + from letta.orm.organization import Organization from anthropic.types.beta.messages import BetaMessageBatch from sqlalchemy import DateTime, ForeignKey, Index, String diff --git a/letta/orm/mcp_server.py b/letta/orm/mcp_server.py index a62ff1d6..955a9059 100644 --- a/letta/orm/mcp_server.py +++ b/letta/orm/mcp_server.py @@ -1,4 +1,3 @@ -import json from typing import TYPE_CHECKING, Optional from sqlalchemy import JSON, String, Text, UniqueConstraint diff --git a/letta/orm/message.py b/letta/orm/message.py index 578c7120..10b6b562 100644 --- a/letta/orm/message.py +++ b/letta/orm/message.py @@ -1,4 +1,10 @@ -from typing import List, Optional +from typing import TYPE_CHECKING, List, Optional + +if TYPE_CHECKING: + from letta.orm.job import Job + from letta.orm.organization import Organization + from letta.orm.run import Run + from letta.orm.step import Step from openai.types.chat.chat_completion_message_tool_call import ChatCompletionMessageToolCall as OpenAIToolCall from sqlalchemy import BigInteger, FetchedValue, ForeignKey, Index, event, text diff --git a/letta/orm/passage.py b/letta/orm/passage.py index 4bbe9509..fb59aff5 100644 --- a/letta/orm/passage.py +++ b/letta/orm/passage.py @@ -15,6 +15,7 @@ config = LettaConfig() if TYPE_CHECKING: from letta.orm.organization import Organization + from letta.orm.passage_tag import PassageTag class BasePassage(SqlalchemyBase, OrganizationMixin): diff --git a/letta/orm/provider_trace.py b/letta/orm/provider_trace.py index 90399b5d..2a4e4bd2 100644 --- a/letta/orm/provider_trace.py +++ b/letta/orm/provider_trace.py @@ -1,5 +1,8 @@ import uuid -from typing import Optional +from typing import TYPE_CHECKING, Optional + +if TYPE_CHECKING: + from letta.orm.organization import Organization from sqlalchemy import JSON, Index, String from sqlalchemy.orm import Mapped, mapped_column, relationship diff --git a/letta/orm/provider_trace_metadata.py b/letta/orm/provider_trace_metadata.py index 5d8fecf7..1d632a1e 100644 --- a/letta/orm/provider_trace_metadata.py +++ b/letta/orm/provider_trace_metadata.py @@ -1,6 +1,9 @@ import uuid from datetime import datetime -from typing import Optional +from typing import TYPE_CHECKING, Optional + +if TYPE_CHECKING: + from letta.orm.organization import Organization from sqlalchemy import JSON, DateTime, Index, String, UniqueConstraint, func from sqlalchemy.orm import Mapped, mapped_column, relationship diff --git a/letta/orm/run.py b/letta/orm/run.py index b2444e54..947a68bf 100644 --- a/letta/orm/run.py +++ b/letta/orm/run.py @@ -2,7 +2,7 @@ import uuid from datetime import datetime from typing import TYPE_CHECKING, List, Optional -from sqlalchemy import JSON, BigInteger, Boolean, DateTime, ForeignKey, Index, String +from sqlalchemy import JSON, BigInteger, Boolean, ForeignKey, Index, String from sqlalchemy.orm import Mapped, mapped_column, relationship from letta.orm.mixins import OrganizationMixin, ProjectMixin, TemplateMixin diff --git a/letta/orm/run_metrics.py b/letta/orm/run_metrics.py index 22c5d8e7..8cc4d79a 100644 --- a/letta/orm/run_metrics.py +++ b/letta/orm/run_metrics.py @@ -1,7 +1,7 @@ from datetime import datetime, timezone from typing import TYPE_CHECKING, List, Optional -from sqlalchemy import JSON, BigInteger, ForeignKey, Integer, String +from sqlalchemy import JSON, BigInteger, ForeignKey, Integer from sqlalchemy.ext.asyncio import AsyncSession from sqlalchemy.orm import Mapped, Session, mapped_column, relationship @@ -14,7 +14,6 @@ from letta.settings import DatabaseChoice, settings if TYPE_CHECKING: from letta.orm.agent import Agent from letta.orm.run import Run - from letta.orm.step import Step class RunMetrics(SqlalchemyBase, ProjectMixin, AgentMixin, OrganizationMixin, TemplateMixin): diff --git a/letta/orm/sqlalchemy_base.py b/letta/orm/sqlalchemy_base.py index c012c54a..314a565f 100644 --- a/letta/orm/sqlalchemy_base.py +++ b/letta/orm/sqlalchemy_base.py @@ -1,3 +1,4 @@ +import asyncio import inspect from datetime import datetime from enum import Enum @@ -5,8 +6,9 @@ from functools import wraps from pprint import pformat from typing import TYPE_CHECKING, List, Literal, Optional, Tuple, Union -from asyncpg.exceptions import QueryCanceledError +from asyncpg.exceptions import DeadlockDetectedError, LockNotAvailableError as AsyncpgLockNotAvailableError, QueryCanceledError from sqlalchemy import Sequence, String, and_, delete, func, or_, select +from sqlalchemy.dialects.postgresql import insert as pg_insert from sqlalchemy.exc import DBAPIError, IntegrityError, TimeoutError from sqlalchemy.ext.asyncio import AsyncSession from sqlalchemy.orm import Mapped, Session, mapped_column @@ -16,15 +18,41 @@ from sqlalchemy.orm.interfaces import ORMOption from letta.errors import ConcurrentUpdateError from letta.log import get_logger from letta.orm.base import Base, CommonSqlalchemyMetaMixins -from letta.orm.errors import DatabaseTimeoutError, ForeignKeyConstraintViolationError, NoResultFound, UniqueConstraintViolationError +from letta.orm.errors import ( + DatabaseDeadlockError, + DatabaseLockNotAvailableError, + DatabaseTimeoutError, + ForeignKeyConstraintViolationError, + NoResultFound, + UniqueConstraintViolationError, +) from letta.settings import DatabaseChoice if TYPE_CHECKING: from pydantic import BaseModel + from sqlalchemy import Select + + from letta.schemas.user import User logger = get_logger(__name__) +_DEADLOCK_MAX_RETRIES = 3 +_DEADLOCK_BASE_DELAY = 0.1 + + +def _is_deadlock_error(exc: Exception) -> bool: + """Check if an exception is a database deadlock error (PostgreSQL error code 40P01).""" + orig = getattr(exc, "orig", exc) + if isinstance(orig, DeadlockDetectedError): + return True + if hasattr(orig, "pgcode") and getattr(orig, "pgcode", None) == "40P01": + return True + if hasattr(orig, "args") and orig.args and isinstance(orig.args[0], dict): + if orig.args[0].get("C") == "40P01": + return True + return False + def handle_db_timeout(func): """Decorator to handle database timeout errors and wrap them in a custom exception. @@ -521,24 +549,52 @@ class SqlalchemyBase(CommonSqlalchemyMetaMixins, Base): actor: Optional["User"] = None, no_commit: bool = False, no_refresh: bool = False, - ) -> "SqlalchemyBase": - """Async version of create function""" + ignore_conflicts: bool = False, + ) -> Optional["SqlalchemyBase"]: + """Async version of create function + + Args: + ignore_conflicts: If True, uses INSERT ... ON CONFLICT DO NOTHING and returns + None if a conflict occurred (no exception raised). + """ logger.debug(f"Creating {self.__class__.__name__} with ID: {self.id} with actor={actor}") if actor: self._set_created_and_updated_by_fields(actor.id) - try: - db_session.add(self) - if no_commit: - await db_session.flush() # no commit, just flush to get PK - else: - await db_session.commit() - if not no_refresh: - await db_session.refresh(self) - return self - except (DBAPIError, IntegrityError) as e: - self._handle_dbapi_error(e) + if ignore_conflicts: + values = { + col.name: getattr(self, col.key) + for col in self.__table__.columns + if not (getattr(self, col.key) is None and col.server_default is not None) + } + stmt = pg_insert(self.__table__).values(**values).on_conflict_do_nothing() + result = await db_session.execute(stmt) + if not no_commit: + await db_session.commit() + return self if result.rowcount > 0 else None + + for attempt in range(_DEADLOCK_MAX_RETRIES): + try: + db_session.add(self) + if no_commit: + await db_session.flush() + else: + await db_session.commit() + + if not no_refresh: + await db_session.refresh(self) + return self + except (DBAPIError, IntegrityError) as e: + if _is_deadlock_error(e) and attempt < _DEADLOCK_MAX_RETRIES - 1: + logger.warning( + f"Deadlock detected in {self.__class__.__name__}.create_async " + f"(attempt {attempt + 1}/{_DEADLOCK_MAX_RETRIES}), retrying..." + ) + await db_session.rollback() + await asyncio.sleep(_DEADLOCK_BASE_DELAY * (2**attempt)) + continue + self._handle_dbapi_error(e) @classmethod @handle_db_timeout @@ -567,31 +623,38 @@ class SqlalchemyBase(CommonSqlalchemyMetaMixins, Base): if not items: return [] - # Set created/updated by fields if actor is provided if actor: for item in items: item._set_created_and_updated_by_fields(actor.id) - try: - db_session.add_all(items) - if no_commit: - await db_session.flush() - else: - await db_session.commit() + for attempt in range(_DEADLOCK_MAX_RETRIES): + try: + db_session.add_all(items) + if no_commit: + await db_session.flush() + else: + await db_session.commit() - if no_refresh: - return items - else: - # Re-query the objects to get them with relationships loaded - item_ids = [item.id for item in items] - query = select(cls).where(cls.id.in_(item_ids)) - if hasattr(cls, "created_at"): - query = query.order_by(cls.created_at) + if no_refresh: + return items + else: + item_ids = [item.id for item in items] + query = select(cls).where(cls.id.in_(item_ids)) + if hasattr(cls, "created_at"): + query = query.order_by(cls.created_at) - result = await db_session.execute(query) - return list(result.scalars()) - except (DBAPIError, IntegrityError) as e: - cls._handle_dbapi_error(e) + result = await db_session.execute(query) + return list(result.scalars()) + except (DBAPIError, IntegrityError) as e: + if _is_deadlock_error(e) and attempt < _DEADLOCK_MAX_RETRIES - 1: + logger.warning( + f"Deadlock detected in {cls.__name__}.batch_create_async " + f"(attempt {attempt + 1}/{_DEADLOCK_MAX_RETRIES}), retrying..." + ) + await db_session.rollback() + await asyncio.sleep(_DEADLOCK_BASE_DELAY * (2**attempt)) + continue + cls._handle_dbapi_error(e) @handle_db_timeout async def delete_async(self, db_session: "AsyncSession", actor: Optional["User"] = None) -> "SqlalchemyBase": @@ -607,18 +670,26 @@ class SqlalchemyBase(CommonSqlalchemyMetaMixins, Base): @handle_db_timeout async def hard_delete_async(self, db_session: "AsyncSession", actor: Optional["User"] = None) -> None: """Permanently removes the record from the database asynchronously.""" - # Capture ID before deletion attempt to avoid lazy loading in exception handler obj_id = self.id obj_class = self.__class__.__name__ logger.debug(f"Hard deleting {obj_class} with ID: {obj_id} with actor={actor} (async)") - try: - await db_session.delete(self) - await db_session.commit() - except Exception as e: - await db_session.rollback() - logger.exception(f"Failed to hard delete {obj_class} with ID {obj_id}") - raise ValueError(f"Failed to hard delete {obj_class} with ID {obj_id}: {e}") + for attempt in range(_DEADLOCK_MAX_RETRIES): + try: + await db_session.delete(self) + await db_session.commit() + return + except Exception as e: + if _is_deadlock_error(e) and attempt < _DEADLOCK_MAX_RETRIES - 1: + logger.warning( + f"Deadlock detected in {obj_class}.hard_delete_async (attempt {attempt + 1}/{_DEADLOCK_MAX_RETRIES}), retrying..." + ) + await db_session.rollback() + await asyncio.sleep(_DEADLOCK_BASE_DELAY * (2**attempt)) + continue + await db_session.rollback() + logger.exception(f"Failed to hard delete {obj_class} with ID {obj_id}") + raise ValueError(f"Failed to hard delete {obj_class} with ID {obj_id}: {e}") @classmethod @handle_db_timeout @@ -637,21 +708,35 @@ class SqlalchemyBase(CommonSqlalchemyMetaMixins, Base): logger.debug(f"No identifiers provided for {cls.__name__}, nothing to delete") return - query = delete(cls) - query = query.where(cls.id.in_(identifiers)) - query = cls.apply_access_predicate(query, actor, access, access_type) - try: - result = await db_session.execute(query) - await db_session.commit() - logger.debug(f"Successfully deleted {result.rowcount} {cls.__name__} records") - except Exception as e: - await db_session.rollback() - logger.exception(f"Failed to hard delete {cls.__name__} with identifiers {identifiers}") - raise ValueError(f"Failed to hard delete {cls.__name__} with identifiers {identifiers}: {e}") + for attempt in range(_DEADLOCK_MAX_RETRIES): + query = delete(cls) + query = query.where(cls.id.in_(identifiers)) + query = cls.apply_access_predicate(query, actor, access, access_type) + try: + result = await db_session.execute(query) + await db_session.commit() + logger.debug(f"Successfully deleted {result.rowcount} {cls.__name__} records") + return + except Exception as e: + if _is_deadlock_error(e) and attempt < _DEADLOCK_MAX_RETRIES - 1: + logger.warning( + f"Deadlock detected in {cls.__name__}.bulk_hard_delete_async " + f"(attempt {attempt + 1}/{_DEADLOCK_MAX_RETRIES}), retrying..." + ) + await db_session.rollback() + await asyncio.sleep(_DEADLOCK_BASE_DELAY * (2**attempt)) + continue + await db_session.rollback() + logger.exception(f"Failed to hard delete {cls.__name__} with identifiers {identifiers}") + raise ValueError(f"Failed to hard delete {cls.__name__} with identifiers {identifiers}: {e}") @handle_db_timeout async def update_async( - self, db_session: "AsyncSession", actor: Optional["User"] = None, no_commit: bool = False, no_refresh: bool = False + self, + db_session: "AsyncSession", + actor: Optional["User"] = None, + no_commit: bool = False, + no_refresh: bool = False, ) -> "SqlalchemyBase": """Async version of update function""" logger.debug(f"Updating {self.__class__.__name__} with ID: {self.id} with actor={actor}") @@ -660,30 +745,36 @@ class SqlalchemyBase(CommonSqlalchemyMetaMixins, Base): self._set_created_and_updated_by_fields(actor.id) self.set_updated_at() - # Capture id before try block to avoid accessing expired attributes after rollback object_id = self.id class_name = self.__class__.__name__ - try: - db_session.add(self) - if no_commit: - await db_session.flush() - else: - await db_session.commit() + # Snapshot column values before commit so they survive rollback's expire-on-rollback behavior + _col_snapshot = {c.key: self.__dict__[c.key] for c in self.__class__.__table__.columns if c.key in self.__dict__} - if not no_refresh: - await db_session.refresh(self) - return self - except StaleDataError as e: - # This can occur when using optimistic locking (version_id_col) and: - # 1. The row doesn't exist (0 rows matched) - # 2. The version has changed (concurrent update) - # In practice, case 1 is rare (blocks aren't frequently deleted), so we always - # return 409 ConcurrentUpdateError. If it was actually deleted, the retry will get 404. - # Not worth performing another db query to check if the row exists. - raise ConcurrentUpdateError(resource_type=class_name, resource_id=object_id) from e - except (DBAPIError, IntegrityError) as e: - self._handle_dbapi_error(e) + for attempt in range(_DEADLOCK_MAX_RETRIES): + try: + db_session.add(self) + if no_commit: + await db_session.flush() + else: + await db_session.commit() + + if not no_refresh: + await db_session.refresh(self) + return self + except StaleDataError as e: + raise ConcurrentUpdateError(resource_type=class_name, resource_id=object_id) from e + except (DBAPIError, IntegrityError) as e: + if _is_deadlock_error(e) and attempt < _DEADLOCK_MAX_RETRIES - 1: + logger.warning( + f"Deadlock detected in {class_name}.update_async (attempt {attempt + 1}/{_DEADLOCK_MAX_RETRIES}), retrying..." + ) + await db_session.rollback() + for key, value in _col_snapshot.items(): + setattr(self, key, value) + await asyncio.sleep(_DEADLOCK_BASE_DELAY * (2**attempt)) + continue + self._handle_dbapi_error(e) @classmethod def _size_preprocess( @@ -810,6 +901,18 @@ class SqlalchemyBase(CommonSqlalchemyMetaMixins, Base): logger.error(f"Query canceled (statement timeout) for {cls.__name__}: {e}") raise DatabaseTimeoutError(message=f"Query canceled due to statement timeout for {cls.__name__}.", original_exception=e) from e + if isinstance(orig, DeadlockDetectedError): + logger.error(f"Deadlock detected for {cls.__name__}: {e}") + raise DatabaseDeadlockError(message=f"A database deadlock was detected for {cls.__name__}.", original_exception=e) from e + + # Handle asyncpg LockNotAvailableError (wrapped in DBAPIError) + # This occurs when a SELECT ... FOR UPDATE NOWAIT or similar fails to acquire a lock + if isinstance(orig, AsyncpgLockNotAvailableError): + logger.warning(f"Lock not available for {cls.__name__}: {e}") + raise DatabaseLockNotAvailableError( + message=f"Could not acquire lock for {cls.__name__}. Another operation is in progress.", original_exception=e + ) from e + # Handle SQLite-specific errors if "UNIQUE constraint failed" in error_message: raise UniqueConstraintViolationError( @@ -844,6 +947,18 @@ class SqlalchemyBase(CommonSqlalchemyMetaMixins, Base): f"A foreign key constraint was violated for {cls.__name__}. Check your input for missing or invalid references: {e}" ) from e + # Handle deadlock detected + if error_code == "40P01": + logger.error(f"Deadlock detected for {cls.__name__}: {e}") + raise DatabaseDeadlockError(message=f"A database deadlock was detected for {cls.__name__}.", original_exception=e) from e + + # Handle lock not available (e.g. NOWAIT or lock_timeout exceeded) + if error_code == "55P03": + logger.warning(f"Lock not available for {cls.__name__}: {e}") + raise DatabaseLockNotAvailableError( + message=f"Could not acquire lock for {cls.__name__}. Another operation is in progress.", original_exception=e + ) from e + # Re-raise for other unhandled DBAPI errors raise diff --git a/letta/orm/step.py b/letta/orm/step.py index 13d6d552..64f0353a 100644 --- a/letta/orm/step.py +++ b/letta/orm/step.py @@ -43,6 +43,9 @@ class Step(SqlalchemyBase, ProjectMixin): provider_name: Mapped[Optional[str]] = mapped_column(None, nullable=True, doc="The name of the provider used for this step.") provider_category: Mapped[Optional[str]] = mapped_column(None, nullable=True, doc="The category of the provider used for this step.") model: Mapped[Optional[str]] = mapped_column(None, nullable=True, doc="The name of the model used for this step.") + model_handle: Mapped[Optional[str]] = mapped_column( + None, nullable=True, doc="The model handle (e.g., 'openai/gpt-4o-mini') used for this step." + ) model_endpoint: Mapped[Optional[str]] = mapped_column(None, nullable=True, doc="The model endpoint url used for this step.") context_window_limit: Mapped[Optional[int]] = mapped_column( None, nullable=True, doc="The context window limit configured for this step." @@ -50,6 +53,15 @@ class Step(SqlalchemyBase, ProjectMixin): completion_tokens: Mapped[int] = mapped_column(default=0, doc="Number of tokens generated by the agent") prompt_tokens: Mapped[int] = mapped_column(default=0, doc="Number of tokens in the prompt") total_tokens: Mapped[int] = mapped_column(default=0, doc="Total number of tokens processed by the agent") + cached_input_tokens: Mapped[Optional[int]] = mapped_column( + None, nullable=True, doc="Number of input tokens served from cache. None if not reported by provider." + ) + cache_write_tokens: Mapped[Optional[int]] = mapped_column( + None, nullable=True, doc="Number of input tokens written to cache (Anthropic only). None if not reported by provider." + ) + reasoning_tokens: Mapped[Optional[int]] = mapped_column( + None, nullable=True, doc="Number of reasoning/thinking tokens generated. None if not reported by provider." + ) completion_tokens_details: Mapped[Optional[Dict]] = mapped_column( JSON, nullable=True, doc="Detailed completion token breakdown (e.g., reasoning_tokens)." ) diff --git a/letta/otel/tracing.py b/letta/otel/tracing.py index a2b8c86a..f0f5f490 100644 --- a/letta/otel/tracing.py +++ b/letta/otel/tracing.py @@ -4,7 +4,6 @@ import itertools import json import re import time -import traceback from functools import wraps from typing import Any, Dict, List, Optional @@ -340,7 +339,7 @@ def trace_method(func): try: # Test if str() works (some objects have broken __str__) try: - test_str = str(value) + str(value) # If str() works and is reasonable, use repr str_value = repr(value) except Exception: diff --git a/letta/plugins/defaults.py b/letta/plugins/defaults.py index f3032ad2..7d105b6b 100644 --- a/letta/plugins/defaults.py +++ b/letta/plugins/defaults.py @@ -1,6 +1,3 @@ -from letta.settings import settings - - def is_experimental_enabled(feature_name: str, **kwargs) -> bool: # if feature_name in ("async_agent_loop", "summarize"): # if not (kwargs.get("eligibility", False) and settings.use_experimental): diff --git a/letta/prompts/prompt_generator.py b/letta/prompts/prompt_generator.py index 90c4a665..e3cedcac 100644 --- a/letta/prompts/prompt_generator.py +++ b/letta/prompts/prompt_generator.py @@ -12,6 +12,13 @@ from letta.otel.tracing import trace_method from letta.schemas.memory import Memory +class PreserveMapping(dict): + """Used to preserve (do not modify) undefined variables in the system prompt""" + + def __missing__(self, key): + return "{" + key + "}" + + class PromptGenerator: # TODO: This code is kind of wonky and deserves a rewrite @trace_method @@ -32,7 +39,7 @@ class PromptGenerator: is available through its tools. Args: - memory_edit_timestamp: When memory blocks were last modified + memory_edit_timestamp: When the system prompt was last recompiled timezone: The timezone to use for formatting timestamps (e.g., 'America/Los_Angeles') previous_message_count: Number of messages in recall memory (conversation history) archival_memory_size: Number of items in archival memory (long-term storage) @@ -44,7 +51,7 @@ class PromptGenerator: Example Output: - The current time is: 2024-01-15 10:30 AM PST - - Memory blocks were last modified: 2024-01-15 09:00 AM PST + - System prompt last recompiled: 2024-01-15 09:00 AM PST - 42 previous messages between you and the user are stored in recall memory (use tools to access them) - 156 total memories you created are stored in archival memory (use tools to access them) - Available archival memory tags: project_x, meeting_notes, research, ideas @@ -57,7 +64,7 @@ class PromptGenerator: metadata_lines = [ "", f"- The current system date is: {get_local_time_fast(timezone)}", - f"- Memory blocks were last modified: {timestamp_str}", + f"- System prompt last recompiled: {timestamp_str}", f"- {previous_message_count} previous messages between you and the user are stored in recall memory (use tools to access them)", ] diff --git a/letta/prompts/summarizer_prompt.py b/letta/prompts/summarizer_prompt.py index 62d6746f..0058c4f7 100644 --- a/letta/prompts/summarizer_prompt.py +++ b/letta/prompts/summarizer_prompt.py @@ -1,3 +1,90 @@ +ALL_WORD_LIMIT = 500 +SLIDING_WORD_LIMIT = 300 + +ALL_PROMPT = f"""Your task is to create a detailed summary of the conversation so far, paying close attention to the user's explicit requests and your previous actions. +This summary should be thorough in capturing technical details, code patterns, and architectural decisions that would be essential for continuing development work without losing context. Your summary should include the following sections: + +1.**High level goals**: What is the high level goal and ongoing task? Capture the user's explicit requests and intent in detail. If there is an existing summary in the transcript, make sure to take it into consideration to continue tracking the higher level goals and long-term progress. + +2. **What happened**: The conversations, tasks, and exchanges that took place. What did the user ask for? What did you do? How did things progress? If there is a previous summary being evicted, please extract a concise version of the critical info from it. + +3. **Important details**: Enumerate specific files and code sections examined, modified, or created with a summary of why this file read or edit is important. Include specific names, data, configurations, or facts that were discussed. Don't omit details that might be referenced later. + +4. **Errors and fixes**: List all errors that you ran into, and how you fixed them. Pay special attention to specific user feedback that you received and record verbatim if useful. + +5. **Current state**:Describe in detail precisely what is currently being worked on, paying special attention to the most recent messages from both user and assistant. Include file names and code snippets where applicable. + +6.**Optional Next Step**: List the next step that you will take that is related to the most recent work you were doing. IMPORTANT: ensure that this step is DIRECTLY in line with the user's most recent explicit requests and the most current task. If your last task was concluded, then only list next steps if they are explicitly in line with the users request. If there is a next step, include direct quotes from the most recent conversation showing exactly what task you were working on and where you left off. + +7. **Lookup hints**: For any detailed content (long lists, extensive data, specific conversations) that couldn't fit in the summary, note the topic and key terms that could be used to find it in message history later. + +Write in first person as a factual record of what occurred. Be concise but thorough - the goal is to preserve enough context that the recent messages make sense and important information isn't lost to prevent duplicate work or repeated mistakes. + +Keep your summary under {ALL_WORD_LIMIT} words. Only output the summary.""" + +SLIDING_PROMPT = f"""The following messages are being evicted from the BEGINNING of your context window. Write a detailed summary that captures what happened in these messages to appear BEFORE the remaining recent messages in context, providing background for what comes after. Include the following sections: + +1.**High level goals**: What is the high level goal and ongoing task? Capture the user's explicit requests and intent in detail. If there is an existing summary in the transcript, make sure to take it into consideration to continue tracking the higher level goals and long-term progress. + +2. **What happened**: The conversations, tasks, and exchanges that took place. What did the user ask for? What did you do? How did things progress? If there is a previous summary being evicted, please extract a concise version of the critical info from it. + +3. **Important details**: Enumerate specific files and code sections examined, modified, or created with a summary of why this file read or edit is important. Include specific names, data, configurations, or facts that were discussed. Don't omit details that might be referenced later. + +4. **Errors and fixes**: List all errors that you ran into, and how you fixed them. Pay special attention to specific user feedback that you received and record verbatim if useful. + +5. **Lookup hints**: For any detailed content (long lists, extensive data, specific conversations) that couldn't fit in the summary, note the topic and key terms that could be used to find it in message history later. + +Write in first person as a factual record of what occurred. Be thorough and detailed - the goal is to preserve enough context that the recent messages make sense and important information isn't lost to prevent duplicate work or repeated mistakes. + +Keep your summary under {SLIDING_WORD_LIMIT} words. Only output the summary.""" + + +SELF_SLIDING_PROMPT = f"""The previous messages are being evicted from the BEGINNING of your context window. Write a detailed summary that captures what happened in these messages to appear BEFORE the remaining recent messages in context, providing background for what comes after. Do NOT continue the conversation. Do NOT respond to any questions in the messages. Do NOT call any tools. Pay close attention to the user's explicit requests and your previous actions. + +You MUST include the following sections: + +1.**High level goals**: What is the high level goal and ongoing task? Capture the user's explicit requests and intent in detail. If there is an existing summary in the transcript, make sure to take it into consideration to continue tracking the higher level goals and long-term progress. + +2. **What happened**: The conversations, tasks, and exchanges that took place. What did the user ask for? What did you do? How did things progress? If there is a previous summary being evicted, please extract a concise version of the critical info from it. + +3. **Important details**: Enumerate specific files and code sections examined, modified, or created with a summary of why this file read or edit is important. Include specific names, data, configurations, or facts that were discussed. Don't omit details that might be referenced later. + +4. **Errors and fixes**: List all errors that you ran into, and how you fixed them. Pay special attention to specific user feedback that you received and record verbatim if useful. + +5. **Lookup hints**: For any detailed content (long lists, extensive data, specific conversations) that couldn't fit in the summary, note the topic and key terms that could be used to find it in message history later. + +Write in first person as a factual record of what occurred. Be thorough and detailed - the goal is to preserve enough context that the recent messages make sense and important information isn't lost to prevent duplicate work or repeated mistakes. + +Keep your summary under {SLIDING_WORD_LIMIT} words. IMPORTANT: Do NOT use any tools. Do NOT continue the conversation. You MUST respond with ONLY the summary as text output. Generate the summary with each section as mentioned: +""" + + +SELF_ALL_PROMPT = f"""Your task is to create a detailed summary of the conversation so far. Do NOT continue the conversation. Do NOT respond to any questions in the messages. Do NOT call any tools. Pay close attention to the user's explicit requests and your previous actions. This summary should be thorough in capturing technical details, code patterns, and architectural decisions that would be essential for continuing development work without losing context. + +You MUST include the following sections: + +1.**High level goals**: What is the high level goal and ongoing task? Capture the user's explicit requests and intent in detail. If there is an existing summary in the transcript, make sure to take it into consideration to continue tracking the higher level goals and long-term progress. + +2. **What happened**: The conversations, tasks, and exchanges that took place. What did the user ask for? What did you do? How did things progress? If there is a previous summary being evicted, please extract a concise version of the critical info from it. + +3. **Important details**: Enumerate specific files and code sections examined, modified, or created with a summary of why this file read or edit is important. Include specific names, data, configurations, or facts that were discussed. Don't omit details that might be referenced later. + +4. **Errors and fixes**: List all errors that you ran into, and how you fixed them. Pay special attention to specific user feedback that you received and record verbatim if useful. + +5. **Current state**:Describe in detail precisely what is currently being worked on, paying special attention to the most recent messages from both user and assistant. Include file names and code snippets where applicable. + +6.**Optional Next Step**: List the next step that you will take that is related to the most recent work you were doing. IMPORTANT: ensure that this step is DIRECTLY in line with the user's most recent explicit requests and the most current task. If your last task was concluded, then only list next steps if they are explicitly in line with the users request. If there is a next step, include direct quotes from the most recent conversation showing exactly what task you were working on and where you left off. + +7. **Lookup hints**: For any detailed content (long lists, extensive data, specific conversations) that couldn't fit in the summary, note the topic and key terms that could be used to find it in message history later. + +Write in first person as a factual record of what occurred. Be concise but thorough - the goal is to preserve enough context that the recent messages make sense and important information isn't lost to prevent duplicate work or repeated mistakes. + +Keep your summary under {ALL_WORD_LIMIT} words. + +IMPORTANT: Do NOT use any tools. Do NOT continue the conversation. You MUST respond with ONLY the summary as text output. Generate the summary with each section as mentioned: +""" + + ANTHROPIC_SUMMARY_PROMPT = """You have been working on the task described above but have not yet completed it. Write a continuation summary that will allow you (or another instance of yourself) to resume work efficiently in a future context window where the conversation history will be replaced with this summary. Your summary should be structured, concise, and actionable. Include: 1. Task Overview @@ -30,7 +117,6 @@ Write the summary from the perspective of the AI (use the first person from the Only output the summary, do NOT include anything else in your output. """ -WORD_LIMIT = 250 SHORTER_SUMMARY_PROMPT = f"""The following messages are being evicted from your context window. Write a detailed summary that captures what happened in these messages. This summary will appear BEFORE the remaining recent messages in context, providing background for what comes after. Include: @@ -45,4 +131,104 @@ This summary will appear BEFORE the remaining recent messages in context, provid Write in first person as a factual record of what occurred. Be thorough and detailed - the goal is to preserve enough context that the recent messages make sense and important information isn't lost. -Keep your summary under {WORD_LIMIT} words. Only output the summary.""" +Keep your summary under {SLIDING_WORD_LIMIT} words. Only output the summary.""" + +SELF_SUMMARIZATION_PROMPT = """Your task is to create a detailed summary of the conversation so far, paying close attention to the user's explicit requests and your previous actions. +This summary should be thorough in capturing technical details, code patterns, and architectural decisions that would be essential for continuing development work without losing context. + +Before providing your final summary, wrap your analysis in tags to organize your thoughts and ensure you've covered all necessary points. In your analysis process: + +1. Chronologically analyze each message and section of the conversation. For each section thoroughly identify: + - The user's explicit requests and intents + - Your approach to addressing the user's requests + - Key decisions, technical concepts and code patterns + - Specific details like: + - file names + - full code snippets + - function signatures + - file edits + - Errors that you ran into and how you fixed them + - Pay special attention to specific user feedback that you received, especially if the user told you to do something differently. +2. Double-check for technical accuracy and completeness, addressing each required element thoroughly. + +Your summary should include the following sections: + +1. Primary Request and Intent: Capture all of the user's explicit requests and intents in detail +2. Key Technical Concepts: List all important technical concepts, technologies, and frameworks discussed. +3. Files and Code Sections: Enumerate specific files and code sections examined, modified, or created. Pay special attention to the most recent messages and include full code snippets where applicable and include a summary of why this file read or edit is important. +4. Errors and fixes: List all errors that you ran into, and how you fixed them. Pay special attention to specific user feedback that you received, especially if the user told you to do something differently. +5. Problem Solving: Document problems solved and any ongoing troubleshooting efforts. +6. All user messages: List ALL user messages that are not tool results. These are critical for understanding the users' feedback and changing intent. +6. Pending Tasks: Outline any pending tasks that you have explicitly been asked to work on. +7. Current Work: Describe in detail precisely what was being worked on immediately before this summary request, paying special attention to the most recent messages from both user and assistant. Include file names and code snippets where applicable. +8. Optional Next Step: List the next step that you will take that is related to the most recent work you were doing. IMPORTANT: ensure that this step is DIRECTLY in line with the user's most recent explicit requests, and the task you were working on immediately before this summary request. If your last task was concluded, then only list next steps if they are explicitly in line with the users request. Do not start on tangential requests or really old requests that were already completed without confirming with the user first. + If there is a next step, include direct quotes from the most recent conversation showing exactly what task you were working on and where you left off. This should be verbatim to ensure there's no drift in task interpretation. + +Here's an example of how your output should be structured: + + + +[Your thought process, ensuring all points are covered thoroughly and accurately] + + + +1. Primary Request and Intent: + [Detailed description] + +2. Key Technical Concepts: + - [Concept 1] + - [Concept 2] + - [...] + +3. Files and Code Sections: + - [File Name 1] + - [Summary of why this file is important] + - [Summary of the changes made to this file, if any] + - [Important Code Snippet] + - [File Name 2] + - [Important Code Snippet] + - [...] + +4. Errors and fixes: + - [Detailed description of error 1]: + - [How you fixed the error] + - [User feedback on the error if any] + - [...] + +5. Problem Solving: + [Description of solved problems and ongoing troubleshooting] + +6. All user messages: + - [Detailed non tool use user message] + - [...] + +7. Pending Tasks: + - [Task 1] + - [Task 2] + - [...] + +8. Current Work: + [Precise description of current work] + +9. Optional Next Step: + [Optional Next step to take] + + + + +Please provide your summary based on the conversation so far, following this structure and ensuring precision and thoroughness in your response. + +There may be additional summarization instructions provided in the included context. If so, remember to follow these instructions when creating the above summary. Examples of instructions include: + +## Compact Instructions +When summarizing the conversation focus on typescript code changes and also remember the mistakes you made and how you fixed them. + + + +# Summary instructions +When you are using compact - please focus on test output and code changes. Include file reads verbatim. + + + +IMPORTANT: Do NOT use any tools. You MUST respond with ONLY the ... block as your text output. +""" diff --git a/letta/schemas/agent.py b/letta/schemas/agent.py index 64cb313b..666aaf60 100644 --- a/letta/schemas/agent.py +++ b/letta/schemas/agent.py @@ -4,13 +4,16 @@ from typing import Dict, List, Literal, Optional from pydantic import BaseModel, ConfigDict, Field, field_validator, model_validator -from letta.constants import CORE_MEMORY_LINE_NUMBER_WARNING, DEFAULT_EMBEDDING_CHUNK_SIZE +from letta.constants import ( + DEFAULT_EMBEDDING_CHUNK_SIZE, + MAX_FILES_OPEN_LIMIT, + MAX_PER_FILE_VIEW_WINDOW_CHAR_LIMIT, +) from letta.errors import AgentExportProcessingError, LettaInvalidArgumentError from letta.schemas.block import Block, CreateBlock from letta.schemas.embedding_config import EmbeddingConfig from letta.schemas.enums import PrimitiveType from letta.schemas.environment_variables import AgentEnvironmentVariable -from letta.schemas.file import FileStatus from letta.schemas.group import Group from letta.schemas.identity import Identity from letta.schemas.letta_base import OrmMetadataBase @@ -27,6 +30,7 @@ from letta.schemas.tool import Tool from letta.schemas.tool_rule import ToolRule from letta.services.summarizer.summarizer_config import CompactionSettings from letta.utils import calculate_file_defaults_based_on_context_window, create_random_username +from letta.validators import BlockId, IdentityId, MessageId, SourceId, ToolId # TODO: Remove this upon next OSS release, there's a duplicate AgentType in enums @@ -210,12 +214,12 @@ class CreateAgent(BaseModel, validate_assignment=True): # ) # TODO: This is a legacy field and should be removed ASAP to force `tool_ids` usage tools: Optional[List[str]] = Field(None, description="The tools used by the agent.") - tool_ids: Optional[List[str]] = Field(None, description="The ids of the tools used by the agent.") - source_ids: Optional[List[str]] = Field( + tool_ids: Optional[List[ToolId]] = Field(None, description="The ids of the tools used by the agent.") + source_ids: Optional[List[SourceId]] = Field( None, description="Deprecated: Use `folder_ids` field instead. The ids of the sources used by the agent.", deprecated=True ) - folder_ids: Optional[List[str]] = Field(None, description="The ids of the folders used by the agent.") - block_ids: Optional[List[str]] = Field(None, description="The ids of the blocks used by the agent.") + folder_ids: Optional[List[SourceId]] = Field(None, description="The ids of the folders used by the agent.") + block_ids: Optional[List[BlockId]] = Field(None, description="The ids of the blocks used by the agent.") tool_rules: Optional[List[ToolRule]] = Field(None, description="The tool rules governing the agent.") tags: Optional[List[str]] = Field(None, description="The tags associated with the agent.") system: Optional[str] = Field(None, description="The system prompt used by the agent.") @@ -306,7 +310,7 @@ class CreateAgent(BaseModel, validate_assignment=True): # base_template_id: Optional[str] = Field( None, description="Deprecated: No longer used. The base template id of the agent.", deprecated=True ) - identity_ids: Optional[List[str]] = Field(None, description="The ids of the identities associated with this agent.") + identity_ids: Optional[List[IdentityId]] = Field(None, description="The ids of the identities associated with this agent.") message_buffer_autoclear: bool = Field( False, description="If set to True, the agent will not remember previous messages (though the agent will still retain state via core memory blocks and archival/recall memory). Not recommended unless you have an advanced use case.", @@ -394,6 +398,28 @@ class CreateAgent(BaseModel, validate_assignment=True): # return embedding + @field_validator("max_files_open") + @classmethod + def validate_max_files_open(cls, value: Optional[int]) -> Optional[int]: + """Validate max_files_open is within acceptable range.""" + if value is not None and value > MAX_FILES_OPEN_LIMIT: + raise LettaInvalidArgumentError( + f"max_files_open cannot exceed {MAX_FILES_OPEN_LIMIT}. Got: {value}", + argument_name="max_files_open", + ) + return value + + @field_validator("per_file_view_window_char_limit") + @classmethod + def validate_per_file_view_window_char_limit(cls, value: Optional[int]) -> Optional[int]: + """Validate per_file_view_window_char_limit is within int32 range for database compatibility.""" + if value is not None and value > MAX_PER_FILE_VIEW_WINDOW_CHAR_LIMIT: + raise LettaInvalidArgumentError( + f"per_file_view_window_char_limit cannot exceed {MAX_PER_FILE_VIEW_WINDOW_CHAR_LIMIT}. Got: {value}", + argument_name="per_file_view_window_char_limit", + ) + return value + @model_validator(mode="after") def validate_sleeptime_for_agent_type(self) -> "CreateAgent": """Validate that enable_sleeptime is True when agent_type is a specific value""" @@ -417,16 +443,16 @@ class InternalTemplateAgentCreate(CreateAgent): class UpdateAgent(BaseModel): name: Optional[str] = Field(None, description="The name of the agent.") - tool_ids: Optional[List[str]] = Field(None, description="The ids of the tools used by the agent.") - source_ids: Optional[List[str]] = Field( + tool_ids: Optional[List[ToolId]] = Field(None, description="The ids of the tools used by the agent.") + source_ids: Optional[List[SourceId]] = Field( None, description="Deprecated: Use `folder_ids` field instead. The ids of the sources used by the agent.", deprecated=True ) - folder_ids: Optional[List[str]] = Field(None, description="The ids of the folders used by the agent.") - block_ids: Optional[List[str]] = Field(None, description="The ids of the blocks used by the agent.") + folder_ids: Optional[List[SourceId]] = Field(None, description="The ids of the folders used by the agent.") + block_ids: Optional[List[BlockId]] = Field(None, description="The ids of the blocks used by the agent.") tags: Optional[List[str]] = Field(None, description="The tags associated with the agent.") system: Optional[str] = Field(None, description="The system prompt used by the agent.") tool_rules: Optional[List[ToolRule]] = Field(None, description="The tool rules governing the agent.") - message_ids: Optional[List[str]] = Field(None, description="The ids of the messages in the agent's in-context memory.") + message_ids: Optional[List[MessageId]] = Field(None, description="The ids of the messages in the agent's in-context memory.") description: Optional[str] = Field(None, description="The description of the agent.") metadata: Optional[Dict] = Field(None, description="The metadata of the agent.") tool_exec_environment_variables: Optional[Dict[str, str]] = Field(None, description="Deprecated: use `secrets` field instead") @@ -434,7 +460,7 @@ class UpdateAgent(BaseModel): project_id: Optional[str] = Field(None, description="The id of the project the agent belongs to.") template_id: Optional[str] = Field(None, description="The id of the template the agent belongs to.") base_template_id: Optional[str] = Field(None, description="The base template id of the agent.") - identity_ids: Optional[List[str]] = Field(None, description="The ids of the identities associated with this agent.") + identity_ids: Optional[List[IdentityId]] = Field(None, description="The ids of the identities associated with this agent.") message_buffer_autoclear: Optional[bool] = Field( None, description="If set to True, the agent will not remember previous messages (though the agent will still retain state via core memory blocks and archival/recall memory). Not recommended unless you have an advanced use case.", @@ -497,6 +523,28 @@ class UpdateAgent(BaseModel): model_config = ConfigDict(extra="ignore") # Ignores extra fields + @field_validator("max_files_open") + @classmethod + def validate_max_files_open(cls, value: Optional[int]) -> Optional[int]: + """Validate max_files_open is within acceptable range.""" + if value is not None and value > MAX_FILES_OPEN_LIMIT: + raise LettaInvalidArgumentError( + f"max_files_open cannot exceed {MAX_FILES_OPEN_LIMIT}. Got: {value}", + argument_name="max_files_open", + ) + return value + + @field_validator("per_file_view_window_char_limit") + @classmethod + def validate_per_file_view_window_char_limit(cls, value: Optional[int]) -> Optional[int]: + """Validate per_file_view_window_char_limit is within int32 range for database compatibility.""" + if value is not None and value > MAX_PER_FILE_VIEW_WINDOW_CHAR_LIMIT: + raise LettaInvalidArgumentError( + f"per_file_view_window_char_limit cannot exceed {MAX_PER_FILE_VIEW_WINDOW_CHAR_LIMIT}. Got: {value}", + argument_name="per_file_view_window_char_limit", + ) + return value + class AgentStepResponse(BaseModel): messages: List[Message] = Field(..., description="The messages generated during the agent's step.") diff --git a/letta/schemas/agent_file.py b/letta/schemas/agent_file.py index ccbdd7f9..129b12f1 100644 --- a/letta/schemas/agent_file.py +++ b/letta/schemas/agent_file.py @@ -1,15 +1,21 @@ from datetime import datetime -from typing import Any, Dict, List, Optional +from typing import Annotated, Any, Dict, List, Literal, Optional, Union from openai.types.chat.chat_completion_message_tool_call import ChatCompletionMessageToolCall as OpenAIToolCall -from pydantic import BaseModel, Field +from pydantic import BaseModel, Field, model_validator from letta.helpers.datetime_helpers import get_utc_time from letta.schemas.agent import AgentState, CreateAgent from letta.schemas.block import Block, CreateBlock from letta.schemas.enums import MessageRole, PrimitiveType from letta.schemas.file import FileAgent, FileAgentBase, FileMetadata, FileMetadataBase -from letta.schemas.group import Group, GroupCreate +from letta.schemas.group import ( + Group, + GroupCreate, + ManagerConfig, + ManagerType, + RoundRobinManager, +) from letta.schemas.letta_message import ApprovalReturn from letta.schemas.mcp import MCPServer from letta.schemas.message import Message, MessageCreate, ToolReturn @@ -129,6 +135,12 @@ class AgentSchema(CreateAgent): files_agents: List[FileAgentSchema] = Field(default_factory=list, description="List of file-agent relationships for this agent") group_ids: List[str] = Field(default_factory=list, description="List of groups that the agent manages") + tool_ids: Optional[List[str]] = Field(None, description="The ids of the tools used by the agent.") + source_ids: Optional[List[str]] = Field(None, description="The ids of the sources used by the agent.") + folder_ids: Optional[List[str]] = Field(None, description="The ids of the folders used by the agent.") + block_ids: Optional[List[str]] = Field(None, description="The ids of the blocks used by the agent.") + identity_ids: Optional[List[str]] = Field(None, description="The ids of the identities associated with this agent.") + @classmethod async def from_agent_state( cls, agent_state: AgentState, message_manager: MessageManager, files_agents: List[FileAgent], actor: User @@ -179,7 +191,14 @@ class AgentSchema(CreateAgent): per_file_view_window_char_limit=agent_state.per_file_view_window_char_limit, ) - messages = await message_manager.list_messages(agent_id=agent_state.id, actor=actor, limit=50) # TODO: Expand to get more messages + # If agent_state.message_ids is set (e.g., from conversation export), fetch those specific messages + # Otherwise fall back to listing messages by agent_id + if agent_state.message_ids: + messages = await message_manager.get_messages_by_ids_async(message_ids=agent_state.message_ids, actor=actor) + else: + messages = await message_manager.list_messages( + agent_id=agent_state.id, actor=actor, limit=50 + ) # TODO: Expand to get more messages # Convert messages to MessageSchema objects message_schemas = [MessageSchema.from_message(msg) for msg in messages] @@ -195,12 +214,52 @@ class AgentSchema(CreateAgent): ) +# Agentfile-specific manager configs that use plain str instead of validated AgentId +# These allow importing agentfiles with simple IDs like "agent-0" + + +class SupervisorManagerSchema(ManagerConfig): + manager_type: Literal[ManagerType.supervisor] = Field(ManagerType.supervisor, description="") + manager_agent_id: str = Field(..., description="") + + +class DynamicManagerSchema(ManagerConfig): + manager_type: Literal[ManagerType.dynamic] = Field(ManagerType.dynamic, description="") + manager_agent_id: str = Field(..., description="") + termination_token: Optional[str] = Field("DONE!", description="") + max_turns: Optional[int] = Field(None, description="") + + +class SleeptimeManagerSchema(ManagerConfig): + manager_type: Literal[ManagerType.sleeptime] = Field(ManagerType.sleeptime, description="") + manager_agent_id: str = Field(..., description="") + sleeptime_agent_frequency: Optional[int] = Field(None, description="") + + +class VoiceSleeptimeManagerSchema(ManagerConfig): + manager_type: Literal[ManagerType.voice_sleeptime] = Field(ManagerType.voice_sleeptime, description="") + manager_agent_id: str = Field(..., description="") + max_message_buffer_length: Optional[int] = Field(None, description="") + min_message_buffer_length: Optional[int] = Field(None, description="") + + +ManagerConfigSchemaUnion = Annotated[ + Union[RoundRobinManager, SupervisorManagerSchema, DynamicManagerSchema, SleeptimeManagerSchema, VoiceSleeptimeManagerSchema], + Field(discriminator="manager_type"), +] + + class GroupSchema(GroupCreate): """Group with human-readable ID for agent file""" __id_prefix__ = PrimitiveType.GROUP.value id: str = Field(..., description="Human-readable identifier for this group in the file") + # Override validated ID fields from GroupCreate to accept simple IDs like "agent-0" + agent_ids: List[str] = Field(..., description="List of agent IDs in this group") + shared_block_ids: List[str] = Field([], description="List of shared block IDs") + manager_config: ManagerConfigSchemaUnion = Field(RoundRobinManager(), description="") + @classmethod def from_group(cls, group: Group) -> "GroupSchema": """Convert Group to GroupSchema""" @@ -308,6 +367,37 @@ class ToolSchema(Tool): return cls(**tool.model_dump()) +class SkillSchema(BaseModel): + """Skill schema for agent files. + + Skills are folders of instructions, scripts, and resources that agents can load. + Either files (with SKILL.md) or source_url must be provided: + - files with SKILL.md: inline skill content + - source_url: reference to resolve later (e.g., 'letta:slack') + - both: inline content with provenance tracking + """ + + name: str = Field(..., description="Skill name, also serves as unique identifier (e.g., 'slack', 'pdf')") + files: Optional[Dict[str, str]] = Field( + default=None, + description="Skill files as path -> content mapping. Must include 'SKILL.md' key if provided.", + ) + source_url: Optional[str] = Field( + default=None, + description="Source URL for skill resolution (e.g., 'letta:slack', 'anthropic:pdf', 'owner/repo/path')", + ) + + @model_validator(mode="after") + def check_files_or_source_url(self) -> "SkillSchema": + """Ensure either files (with SKILL.md) or source_url is provided.""" + has_files = self.files and "SKILL.md" in self.files + has_source_url = self.source_url is not None + + if not has_files and not has_source_url: + raise ValueError("Either files (with 'SKILL.md') or source_url must be provided") + return self + + class MCPServerSchema(BaseModel): """MCP server schema for agent files with remapped ID.""" @@ -348,6 +438,7 @@ class AgentFileSchema(BaseModel): sources: List[SourceSchema] = Field(..., description="List of sources in this agent file") tools: List[ToolSchema] = Field(..., description="List of tools in this agent file") mcp_servers: List[MCPServerSchema] = Field(..., description="List of MCP servers in this agent file") + skills: List[SkillSchema] = Field(default_factory=list, description="List of skills in this agent file") metadata: Dict[str, str] = Field( default_factory=dict, description="Metadata for this agent file, including revision_id and other export information." ) diff --git a/letta/schemas/block.py b/letta/schemas/block.py index 3103c1a9..48734db0 100644 --- a/letta/schemas/block.py +++ b/letta/schemas/block.py @@ -1,12 +1,14 @@ from datetime import datetime from typing import Any, List, Optional -from pydantic import ConfigDict, Field, model_validator +from pydantic import ConfigDict, Field, field_validator, model_validator from letta.constants import CORE_MEMORY_BLOCK_CHAR_LIMIT, DEFAULT_HUMAN_BLOCK_DESCRIPTION, DEFAULT_PERSONA_BLOCK_DESCRIPTION from letta.schemas.enums import PrimitiveType from letta.schemas.letta_base import LettaBase +INT32_MAX = 2147483647 + # block of the LLM context @@ -48,6 +50,22 @@ class BaseBlock(LettaBase, validate_assignment=True): model_config = ConfigDict(extra="ignore") # Ignores extra fields + @field_validator("limit", mode="after") + @classmethod + def validate_limit_int32(cls, v: int) -> int: + """Ensure limit is within PostgreSQL INTEGER (int32) range.""" + if v > INT32_MAX: + raise ValueError(f"limit must be <= {INT32_MAX} (int32 max), got {v}") + return v + + @field_validator("value", mode="before") + @classmethod + def sanitize_value_null_bytes(cls, v): + """Remove null bytes from value to prevent PostgreSQL encoding errors.""" + if isinstance(v, str): + return v.replace("\x00", "") + return v + @model_validator(mode="before") @classmethod def verify_char_limit(cls, data: Any) -> Any: diff --git a/letta/schemas/conversation.py b/letta/schemas/conversation.py index c2a94007..0f6bf7f8 100644 --- a/letta/schemas/conversation.py +++ b/letta/schemas/conversation.py @@ -1,8 +1,10 @@ from typing import List, Optional -from pydantic import BaseModel, Field +from pydantic import BaseModel, Field, field_validator +from letta.errors import LettaInvalidArgumentError from letta.schemas.letta_base import OrmMetadataBase +from letta.schemas.model import ModelSettingsUnion class Conversation(OrmMetadataBase): @@ -18,6 +20,14 @@ class Conversation(OrmMetadataBase): default_factory=list, description="IDs of blocks that are isolated (specific to this conversation, overriding agent defaults).", ) + model: Optional[str] = Field( + None, + description="The model handle for this conversation (overrides agent's model). Format: provider/model-name.", + ) + model_settings: Optional[ModelSettingsUnion] = Field( + None, + description="The model settings for this conversation (overrides agent's model settings).", + ) class CreateConversation(BaseModel): @@ -29,9 +39,49 @@ class CreateConversation(BaseModel): description="List of block labels that should be isolated (conversation-specific) rather than shared across conversations. " "New blocks will be created as copies of the agent's blocks with these labels.", ) + model: Optional[str] = Field( + None, + description="The model handle for this conversation (overrides agent's model). Format: provider/model-name.", + ) + model_settings: Optional[ModelSettingsUnion] = Field( + None, + description="The model settings for this conversation (overrides agent's model settings).", + ) + + @field_validator("model") + @classmethod + def validate_model(cls, model: Optional[str]) -> Optional[str]: + if not model: + return model + if "/" not in model: + raise LettaInvalidArgumentError("The model handle should be in the format provider/model-name", argument_name="model") + provider_name, model_name = model.split("/", 1) + if not provider_name or not model_name: + raise LettaInvalidArgumentError("The model handle should be in the format provider/model-name", argument_name="model") + return model class UpdateConversation(BaseModel): """Request model for updating a conversation.""" summary: Optional[str] = Field(None, description="A summary of the conversation.") + model: Optional[str] = Field( + None, + description="The model handle for this conversation (overrides agent's model). Format: provider/model-name.", + ) + model_settings: Optional[ModelSettingsUnion] = Field( + None, + description="The model settings for this conversation (overrides agent's model settings).", + ) + + @field_validator("model") + @classmethod + def validate_model(cls, model: Optional[str]) -> Optional[str]: + if not model: + return model + if "/" not in model: + raise LettaInvalidArgumentError("The model handle should be in the format provider/model-name", argument_name="model") + provider_name, model_name = model.split("/", 1) + if not provider_name or not model_name: + raise LettaInvalidArgumentError("The model handle should be in the format provider/model-name", argument_name="model") + return model diff --git a/letta/schemas/enums.py b/letta/schemas/enums.py index 96efb446..3996a160 100644 --- a/letta/schemas/enums.py +++ b/letta/schemas/enums.py @@ -96,6 +96,14 @@ class ProviderCategory(str, Enum): byok = "byok" +class LLMCallType(str, Enum): + """Type of LLM call for telemetry tracking.""" + + agent_step = "agent_step" + summarization = "summarization" + tool_generation = "tool_generation" + + class MessageRole(str, Enum): assistant = "assistant" user = "user" @@ -103,6 +111,7 @@ class MessageRole(str, Enum): function = "function" system = "system" approval = "approval" + summary = "summary" class MessageSourceType(str, Enum): diff --git a/letta/schemas/letta_message.py b/letta/schemas/letta_message.py index b90628dd..1482e13c 100644 --- a/letta/schemas/letta_message.py +++ b/letta/schemas/letta_message.py @@ -1,7 +1,7 @@ import json from datetime import datetime, timezone from enum import Enum -from typing import Annotated, List, Literal, Optional, Union +from typing import Annotated, ClassVar, List, Literal, Optional, Union from pydantic import BaseModel, Field, field_serializer, field_validator @@ -61,6 +61,8 @@ class MessageType(str, Enum): tool_return_message = "tool_return_message" approval_request_message = "approval_request_message" approval_response_message = "approval_response_message" + summary_message = "summary_message" + event_message = "event_message" class LettaMessage(BaseModel): @@ -244,7 +246,7 @@ class ToolCallMessage(LettaMessage): return data class Config: - json_encoders = { + json_encoders: ClassVar[dict] = { ToolCallDelta: lambda v: v.model_dump(exclude_none=True), ToolCall: lambda v: v.model_dump(exclude_none=True), } @@ -394,13 +396,50 @@ class LettaErrorMessage(BaseModel): seq_id: Optional[int] = None +class CompactionStats(BaseModel): + """ + Statistics about a memory compaction operation. + """ + + trigger: str = Field(..., description="What triggered the compaction (e.g., 'context_window_exceeded', 'post_step_context_check')") + context_tokens_before: Optional[int] = Field( + None, description="Token count before compaction (from LLM usage stats, includes full context sent to LLM)" + ) + context_tokens_after: Optional[int] = Field( + None, description="Token count after compaction (message tokens only, does not include tool definitions)" + ) + context_window: int = Field(..., description="The model's context window size") + messages_count_before: int = Field(..., description="Number of messages before compaction") + messages_count_after: int = Field(..., description="Number of messages after compaction") + + +def extract_compaction_stats_from_packed_json(text_content: str) -> Optional[CompactionStats]: + """ + Extract CompactionStats from a packed summary message JSON string. + + Args: + text_content: The packed JSON string from summary message content + + Returns: + CompactionStats if found and valid, None otherwise + """ + try: + packed_json = json.loads(text_content) + if isinstance(packed_json, dict) and "compaction_stats" in packed_json: + return CompactionStats(**packed_json["compaction_stats"]) + except (json.JSONDecodeError, TypeError, ValueError): + pass + return None + + class SummaryMessage(LettaMessage): """ A message representing a summary of the conversation. Sent to the LLM as a user or system message depending on the provider. """ - message_type: Literal["summary"] = "summary_message" + message_type: Literal["summary_message"] = "summary_message" summary: str + compaction_stats: Optional[CompactionStats] = None class EventMessage(LettaMessage): @@ -408,7 +447,7 @@ class EventMessage(LettaMessage): A message for notifying the developer that an event that has occured (e.g. a compaction). Events are NOT part of the context window. """ - message_type: Literal["event"] = "event_message" + message_type: Literal["event_message"] = "event_message" event_type: Literal["compaction"] event_data: dict @@ -459,8 +498,8 @@ def create_letta_message_union_schema(): "assistant_message": "#/components/schemas/AssistantMessage", "approval_request_message": "#/components/schemas/ApprovalRequestMessage", "approval_response_message": "#/components/schemas/ApprovalResponseMessage", - "summary": "#/components/schemas/SummaryMessage", - "event": "#/components/schemas/EventMessage", + "summary_message": "#/components/schemas/SummaryMessage", + "event_message": "#/components/schemas/EventMessage", }, }, } diff --git a/letta/schemas/letta_message_content.py b/letta/schemas/letta_message_content.py index 7c62ebd3..6ea9c1cd 100644 --- a/letta/schemas/letta_message_content.py +++ b/letta/schemas/letta_message_content.py @@ -1,7 +1,6 @@ from enum import Enum from typing import Annotated, List, Literal, Optional, Union -from openai.types import Reasoning from pydantic import BaseModel, Field diff --git a/letta/schemas/letta_request.py b/letta/schemas/letta_request.py index 9290cc38..fcff8c24 100644 --- a/letta/schemas/letta_request.py +++ b/letta/schemas/letta_request.py @@ -73,6 +73,32 @@ class LettaRequest(BaseModel): "This allows sending a message to a different model without changing the agent's configuration.", ) + # Compaction message format + include_compaction_messages: bool = Field( + default=False, + description="If True, compaction events emit structured `SummaryMessage` and `EventMessage` types. " + "If False (default), compaction messages are not included in the response.", + ) + + # Log probabilities for RL training + return_logprobs: bool = Field( + default=False, + description="If True, returns log probabilities of the output tokens in the response. " + "Useful for RL training. Only supported for OpenAI-compatible providers (including SGLang).", + ) + top_logprobs: Optional[int] = Field( + default=None, + description="Number of most likely tokens to return at each position (0-20). " + "Requires return_logprobs=True.", + ) + return_token_ids: bool = Field( + default=False, + description="If True, returns token IDs and logprobs for ALL LLM generations in the agent step, " + "not just the last one. Uses SGLang native /generate endpoint. " + "Returns 'turns' field with TurnTokenData for each assistant/tool turn. " + "Required for proper multi-turn RL training with loss masking.", + ) + @field_validator("messages", mode="before") @classmethod def add_default_type_to_messages(cls, v): diff --git a/letta/schemas/letta_response.py b/letta/schemas/letta_response.py index 68ac2dc3..b211d722 100644 --- a/letta/schemas/letta_response.py +++ b/letta/schemas/letta_response.py @@ -2,19 +2,18 @@ import html import json import re from datetime import datetime -from typing import List, Union +from typing import Any, List, Literal, Optional, Union from pydantic import BaseModel, Field, RootModel from letta.helpers.json_helpers import json_dumps -from letta.schemas.enums import JobStatus, MessageStreamStatus +from letta.schemas.enums import JobStatus from letta.schemas.letta_message import ( ApprovalRequestMessage, ApprovalResponseMessage, AssistantMessage, HiddenReasoningMessage, LettaErrorMessage, - LettaMessage, LettaMessageUnion, LettaPing, ReasoningMessage, @@ -25,11 +24,31 @@ from letta.schemas.letta_message import ( ) from letta.schemas.letta_stop_reason import LettaStopReason from letta.schemas.message import Message +from letta.schemas.openai.chat_completion_response import ChoiceLogprobs from letta.schemas.usage import LettaUsageStatistics # TODO: consider moving into own file +class TurnTokenData(BaseModel): + """Token data for a single LLM generation turn in a multi-turn agent interaction. + + Used for RL training to track token IDs and logprobs across all LLM calls, + not just the final one. Tool results are included so the client can tokenize + them with loss_mask=0 (non-trainable). + """ + + role: Literal["assistant", "tool"] = Field( + ..., description="Role of this turn: 'assistant' for LLM generations (trainable), 'tool' for tool results (non-trainable)." + ) + output_ids: Optional[List[int]] = Field(None, description="Token IDs from SGLang native endpoint. Only present for assistant turns.") + output_token_logprobs: Optional[List[List[Any]]] = Field( + None, description="Logprobs from SGLang: [[logprob, token_id, top_logprob_or_null], ...]. Only present for assistant turns." + ) + content: Optional[str] = Field(None, description="Text content. For tool turns, client tokenizes this with loss_mask=0.") + tool_name: Optional[str] = Field(None, description="Name of the tool called. Only present for tool turns.") + + class LettaResponse(BaseModel): """ Response object from an agent interaction, consisting of the new messages generated by the agent and usage statistics. @@ -57,6 +76,16 @@ class LettaResponse(BaseModel): ..., description="The usage statistics of the agent.", ) + logprobs: Optional[ChoiceLogprobs] = Field( + None, + description="Log probabilities of the output tokens from the last LLM call. Only present if return_logprobs was enabled.", + ) + turns: Optional[List[TurnTokenData]] = Field( + None, + description="Token data for all LLM generations in multi-turn agent interaction. " + "Includes token IDs and logprobs for each assistant turn, plus tool result content. " + "Only present if return_token_ids was enabled. Used for RL training with loss masking.", + ) def __str__(self): return json_dumps( diff --git a/letta/schemas/letta_stop_reason.py b/letta/schemas/letta_stop_reason.py index abb28aa7..81c8ac3e 100644 --- a/letta/schemas/letta_stop_reason.py +++ b/letta/schemas/letta_stop_reason.py @@ -17,6 +17,7 @@ class StopReasonType(str, Enum): no_tool_call = "no_tool_call" tool_rule = "tool_rule" cancelled = "cancelled" + insufficient_credits = "insufficient_credits" requires_approval = "requires_approval" context_window_overflow_in_system_prompt = "context_window_overflow_in_system_prompt" @@ -42,6 +43,8 @@ class StopReasonType(str, Enum): return RunStatus.failed elif self == StopReasonType.cancelled: return RunStatus.cancelled + elif self == StopReasonType.insufficient_credits: + return RunStatus.failed else: raise ValueError("Unknown StopReasonType") diff --git a/letta/schemas/llm_config.py b/letta/schemas/llm_config.py index 4a62b2c1..94f294b5 100644 --- a/letta/schemas/llm_config.py +++ b/letta/schemas/llm_config.py @@ -1,4 +1,4 @@ -from typing import TYPE_CHECKING, Annotated, Literal, Optional, Union +from typing import TYPE_CHECKING, Literal, Optional from pydantic import BaseModel, ConfigDict, Field, model_validator @@ -82,9 +82,9 @@ class LLMConfig(BaseModel): 0, description="Configurable thinking budget for extended thinking. Used for enable_reasoner and also for Google Vertex models like Gemini 2.5 Flash. Minimum value is 1024 when used with enable_reasoner.", ) - effort: Optional[Literal["low", "medium", "high"]] = Field( + effort: Optional[Literal["low", "medium", "high", "max"]] = Field( None, - description="The effort level for Anthropic Opus 4.5 model (controls token spending). Not setting this gives similar performance to 'high'.", + description="The effort level for Anthropic models that support it (Opus 4.5, Opus 4.6). Controls token spending and thinking behavior. Not setting this gives similar performance to 'high'.", ) frequency_penalty: Optional[float] = Field( None, # Can also deafult to 0.0? @@ -112,6 +112,19 @@ class LLMConfig(BaseModel): False, description="Enable strict mode for tool calling. When true, tool schemas include strict: true and additionalProperties: false, guaranteeing tool outputs match JSON schemas.", ) + return_logprobs: bool = Field( + False, + description="Whether to return log probabilities of the output tokens. Useful for RL training.", + ) + top_logprobs: Optional[int] = Field( + None, + description="Number of most likely tokens to return at each position (0-20). Requires return_logprobs=True.", + ) + return_token_ids: bool = Field( + False, + description="Whether to return token IDs for all LLM generations via SGLang native endpoint. " + "Required for multi-turn RL training with loss masking. Only works with SGLang provider.", + ) @model_validator(mode="before") @classmethod @@ -124,13 +137,12 @@ class LLMConfig(BaseModel): if model is None: return values - # Set max_tokens defaults based on model - if values.get("max_tokens") is None: + # Set max_tokens defaults based on model (only if not explicitly provided) + if "max_tokens" not in values: if model.startswith("gpt-5"): # Covers both gpt-5 and gpt-5.1 values["max_tokens"] = 16384 elif model == "gpt-4.1": values["max_tokens"] = 8192 - # For other models, the field default of 4096 will be used # Set context_window defaults if not provided if values.get("context_window") is None: @@ -190,6 +202,7 @@ class LLMConfig(BaseModel): or model.startswith("claude-opus-4") or model.startswith("claude-haiku-4-5") or model.startswith("claude-opus-4-5") + or model.startswith("claude-opus-4-6") ): values["put_inner_thoughts_in_kwargs"] = False @@ -346,6 +359,7 @@ class LLMConfig(BaseModel): thinking=AnthropicThinking(type=thinking_type, budget_tokens=self.max_reasoning_tokens or 1024), verbosity=self.verbosity, strict=self.strict, + effort=self.effort, ) elif self.model_endpoint_type == "google_ai": return GoogleAIModelSettings( @@ -374,9 +388,13 @@ class LLMConfig(BaseModel): temperature=self.temperature, ) elif self.model_endpoint_type == "zai": + from letta.schemas.model import ZAIThinking + + thinking_type = "enabled" if self.enable_reasoner else "disabled" return ZAIModelSettings( max_output_tokens=self.max_tokens or 4096, temperature=self.temperature, + thinking=ZAIThinking(type=thinking_type, clear_thinking=False), ) elif self.model_endpoint_type == "groq": return GroqModelSettings( @@ -437,6 +455,7 @@ class LLMConfig(BaseModel): or config.model.startswith("claude-3-7-sonnet") or config.model.startswith("claude-haiku-4-5") or config.model.startswith("claude-opus-4-5") + or config.model.startswith("claude-opus-4-6") ) @classmethod @@ -451,6 +470,48 @@ class LLMConfig(BaseModel): config.model.startswith("gemini-2.5-flash") or config.model.startswith("gemini-2.5-pro") ) + @classmethod + def is_zai_reasoning_model(cls, config: "LLMConfig") -> bool: + return config.model_endpoint_type == "zai" and ( + config.model.startswith("glm-4.5") + or config.model.startswith("glm-4.6") + or config.model.startswith("glm-4.7") + or config.model.startswith("glm-5") + ) + + @classmethod + def is_openrouter_reasoning_model(cls, config: "LLMConfig") -> bool: + """Check if this is an OpenRouter model that supports reasoning. + + OpenRouter model names include provider prefix, e.g.: + - anthropic/claude-sonnet-4 + - openai/o3-mini + - moonshotai/kimi-k2-thinking + - deepseek/deepseek-r1 + """ + if config.model_endpoint_type != "openrouter": + return False + model = config.model.lower() + # OpenAI reasoning models + if "/o1" in model or "/o3" in model or "/o4" in model or "/gpt-5" in model: + return True + # Anthropic Claude reasoning models + if "claude-3-7-sonnet" in model or "claude-sonnet-4" in model or "claude-opus-4" in model or "claude-haiku-4" in model: + return True + # Google Gemini reasoning models + if "gemini" in model: + return True + # ZAI GLM reasoning models + if "glm-4.5" in model or "glm-4.6" in model or "glm-4.7" in model or "glm-5" in model: + return True + # DeepSeek reasoning models + if "deepseek-r1" in model or "deepseek-reasoner" in model: + return True + # Moonshot Kimi reasoning models + if "kimi" in model: + return True + return False + @classmethod def supports_verbosity(cls, config: "LLMConfig") -> bool: """Check if the model supports verbosity control.""" @@ -500,11 +561,27 @@ class LLMConfig(BaseModel): config.put_inner_thoughts_in_kwargs = False if config.enable_reasoner and config.max_reasoning_tokens == 0: config.max_reasoning_tokens = 1024 - # Set default effort level for Claude Opus 4.5 - if config.model.startswith("claude-opus-4-5") and config.effort is None: + # Set default effort level for Claude Opus 4.5 and Opus 4.6 + if ( + config.model.startswith("claude-opus-4-5") + or config.model.startswith("claude-opus-4-6") + or config.model.startswith("claude-sonnet-4-6") + ) and config.effort is None: config.effort = "medium" return config + # ZAI GLM-4.5+ models: toggle honored (similar to Anthropic) + if cls.is_zai_reasoning_model(config): + config.enable_reasoner = bool(reasoning) + config.put_inner_thoughts_in_kwargs = False + return config + + # OpenRouter reasoning models: toggle honored + if cls.is_openrouter_reasoning_model(config): + config.enable_reasoner = bool(reasoning) + config.put_inner_thoughts_in_kwargs = False + return config + # Google Gemini 2.5 Pro and Gemini 3: not possible to disable if config.model.startswith("gemini-2.5-pro") or config.model.startswith("gemini-3"): config.put_inner_thoughts_in_kwargs = False @@ -557,14 +634,22 @@ class LLMConfig(BaseModel): config.put_inner_thoughts_in_kwargs = False if config.max_reasoning_tokens == 0: config.max_reasoning_tokens = 1024 - # Set default effort level for Claude Opus 4.5 - if config.model.startswith("claude-opus-4-5") and config.effort is None: + # Set default effort level for Claude Opus 4.5 and Opus 4.6 + if ( + config.model.startswith("claude-opus-4-5") + or config.model.startswith("claude-opus-4-6") + or config.model.startswith("claude-sonnet-4-6") + ) and config.effort is None: config.effort = "medium" elif cls.is_google_vertex_reasoning_model(config) or cls.is_google_ai_reasoning_model(config): # Handle as non-reasoner until we support summary config.put_inner_thoughts_in_kwargs = True if config.max_reasoning_tokens == 0: config.max_reasoning_tokens = 1024 + elif cls.is_zai_reasoning_model(config): + config.put_inner_thoughts_in_kwargs = False + elif cls.is_openrouter_reasoning_model(config): + config.put_inner_thoughts_in_kwargs = False elif cls.is_openai_reasoning_model(config): config.put_inner_thoughts_in_kwargs = False if config.reasoning_effort is None: diff --git a/letta/schemas/llm_config_overrides.py b/letta/schemas/llm_config_overrides.py index 407c73a2..6978938d 100644 --- a/letta/schemas/llm_config_overrides.py +++ b/letta/schemas/llm_config_overrides.py @@ -3,6 +3,7 @@ from typing import Dict LLM_HANDLE_OVERRIDES: Dict[str, Dict[str, str]] = { "anthropic": { "claude-3-5-haiku-20241022": "claude-3-5-haiku", + "claude-haiku-4-5-20251001": "claude-haiku-4-5", "claude-3-5-sonnet-20241022": "claude-3-5-sonnet", "claude-3-opus-20240229": "claude-3-opus", }, diff --git a/letta/schemas/llm_trace.py b/letta/schemas/llm_trace.py new file mode 100644 index 00000000..13cbb806 --- /dev/null +++ b/letta/schemas/llm_trace.py @@ -0,0 +1,166 @@ +"""Schema for LLM request/response traces stored in ClickHouse for analytics.""" + +from __future__ import annotations + +from datetime import datetime +from typing import Optional + +from pydantic import Field + +from letta.helpers.datetime_helpers import get_utc_time +from letta.schemas.letta_base import LettaBase + + +class LLMTrace(LettaBase): + """ + LLM request/response trace for ClickHouse analytics. + + Stores LLM request/response payloads with denormalized columns for + fast cost analytics queries (token usage by org/agent/model). + + Attributes: + id (str): Unique trace identifier (UUID). + organization_id (str): The organization this trace belongs to. + project_id (str): The project this trace belongs to. + agent_id (str): ID of the agent that made the request. + run_id (str): ID of the run this trace is associated with. + step_id (str): ID of the step that generated this trace. + trace_id (str): OTEL trace ID for correlation. + + call_type (str): Type of LLM call ('agent_step', 'summarization', 'embedding'). + provider (str): LLM provider name ('openai', 'anthropic', etc.). + model (str): Model name/identifier used. + + request_size_bytes (int): Size of request_json in bytes. + response_size_bytes (int): Size of response_json in bytes. + prompt_tokens (int): Number of prompt tokens used. + completion_tokens (int): Number of completion tokens generated. + total_tokens (int): Total tokens (prompt + completion). + latency_ms (int): Request latency in milliseconds. + + is_error (bool): Whether the request resulted in an error. + error_type (str): Exception class name if error occurred. + error_message (str): Error message if error occurred. + + request_json (str): Full request payload as JSON string. + response_json (str): Full response payload as JSON string. + + created_at (datetime): Timestamp when the trace was created. + """ + + __id_prefix__ = "llm_trace" + + # Primary identifier (UUID portion of ProviderTrace.id, prefix stripped for ClickHouse) + id: str = Field(..., description="Trace UUID (strip 'provider_trace-' prefix to correlate)") + + # Context identifiers + organization_id: str = Field(..., description="Organization this trace belongs to") + project_id: Optional[str] = Field(default=None, description="Project this trace belongs to") + agent_id: Optional[str] = Field(default=None, description="Agent that made the request") + agent_tags: list[str] = Field(default_factory=list, description="Tags associated with the agent") + run_id: Optional[str] = Field(default=None, description="Run this trace is associated with") + step_id: Optional[str] = Field(default=None, description="Step that generated this trace") + trace_id: Optional[str] = Field(default=None, description="OTEL trace ID for correlation") + + # Request metadata (queryable) + call_type: str = Field(..., description="Type of LLM call: 'agent_step', 'summarization', 'embedding'") + provider: str = Field(..., description="LLM provider: 'openai', 'anthropic', 'google_ai', etc.") + model: str = Field(..., description="Model name/identifier") + is_byok: bool = Field(default=False, description="Whether this request used BYOK (Bring Your Own Key)") + + # Size metrics + request_size_bytes: int = Field(default=0, description="Size of request_json in bytes") + response_size_bytes: int = Field(default=0, description="Size of response_json in bytes") + + # Token usage + prompt_tokens: int = Field(default=0, description="Number of prompt tokens") + completion_tokens: int = Field(default=0, description="Number of completion tokens") + total_tokens: int = Field(default=0, description="Total tokens (prompt + completion)") + + # Cache and reasoning tokens (from LettaUsageStatistics) + cached_input_tokens: Optional[int] = Field(default=None, description="Number of input tokens served from cache") + cache_write_tokens: Optional[int] = Field(default=None, description="Number of tokens written to cache (Anthropic)") + reasoning_tokens: Optional[int] = Field(default=None, description="Number of reasoning/thinking tokens generated") + + # Latency + latency_ms: int = Field(default=0, description="Request latency in milliseconds") + + # Error tracking + is_error: bool = Field(default=False, description="Whether the request resulted in an error") + error_type: Optional[str] = Field(default=None, description="Exception class name if error") + error_message: Optional[str] = Field(default=None, description="Error message if error") + + # Raw payloads (JSON strings) + request_json: str = Field(..., description="Full request payload as JSON string") + response_json: str = Field(..., description="Full response payload as JSON string") + llm_config_json: str = Field(default="", description="LLM config as JSON string") + + # Timestamp + created_at: datetime = Field(default_factory=get_utc_time, description="When the trace was created") + + def to_clickhouse_row(self) -> tuple: + """Convert to a tuple for ClickHouse insertion.""" + return ( + self.id, + self.organization_id, + self.project_id or "", + self.agent_id or "", + self.agent_tags, + self.run_id or "", + self.step_id or "", + self.trace_id or "", + self.call_type, + self.provider, + self.model, + 1 if self.is_byok else 0, + self.request_size_bytes, + self.response_size_bytes, + self.prompt_tokens, + self.completion_tokens, + self.total_tokens, + self.cached_input_tokens, + self.cache_write_tokens, + self.reasoning_tokens, + self.latency_ms, + 1 if self.is_error else 0, + self.error_type or "", + self.error_message or "", + self.request_json, + self.response_json, + self.llm_config_json, + self.created_at, + ) + + @classmethod + def clickhouse_columns(cls) -> list[str]: + """Return column names for ClickHouse insertion.""" + return [ + "id", + "organization_id", + "project_id", + "agent_id", + "agent_tags", + "run_id", + "step_id", + "trace_id", + "call_type", + "provider", + "model", + "is_byok", + "request_size_bytes", + "response_size_bytes", + "prompt_tokens", + "completion_tokens", + "total_tokens", + "cached_input_tokens", + "cache_write_tokens", + "reasoning_tokens", + "latency_ms", + "is_error", + "error_type", + "error_message", + "request_json", + "response_json", + "llm_config_json", + "created_at", + ] diff --git a/letta/schemas/mcp.py b/letta/schemas/mcp.py index 06190ed1..614f2df9 100644 --- a/letta/schemas/mcp.py +++ b/letta/schemas/mcp.py @@ -20,7 +20,6 @@ from letta.orm.mcp_oauth import OAuthSessionStatus from letta.schemas.enums import PrimitiveType from letta.schemas.letta_base import LettaBase from letta.schemas.secret import Secret -from letta.settings import settings class BaseMCPServer(LettaBase): diff --git a/letta/schemas/mcp_server.py b/letta/schemas/mcp_server.py index a671467c..7ec807e6 100644 --- a/letta/schemas/mcp_server.py +++ b/letta/schemas/mcp_server.py @@ -1,4 +1,3 @@ -import json from datetime import datetime from typing import Annotated, Any, Dict, List, Literal, Optional, Union from urllib.parse import urlparse @@ -6,12 +5,8 @@ from urllib.parse import urlparse from pydantic import Field, field_validator from letta.functions.mcp_client.types import ( - MCP_AUTH_HEADER_AUTHORIZATION, MCP_AUTH_TOKEN_BEARER_PREFIX, MCPServerType, - SSEServerConfig, - StdioServerConfig, - StreamableHTTPServerConfig, ) from letta.orm.mcp_oauth import OAuthSessionStatus from letta.schemas.enums import PrimitiveType diff --git a/letta/schemas/memory.py b/letta/schemas/memory.py index 4a9e64e4..3c17f557 100644 --- a/letta/schemas/memory.py +++ b/letta/schemas/memory.py @@ -2,7 +2,7 @@ import asyncio import logging from datetime import datetime from io import StringIO -from typing import TYPE_CHECKING, List, Optional, Union +from typing import List, Optional, Union from letta.log import get_logger @@ -43,6 +43,17 @@ class ContextWindowOverview(BaseModel): num_tokens_core_memory: int = Field(..., description="The number of tokens in the core memory.") core_memory: str = Field(..., description="The content of the core memory.") + num_tokens_memory_filesystem: int = Field( + 0, description="The number of tokens in the memory filesystem section (git-enabled agents only)." + ) + memory_filesystem: Optional[str] = Field(None, description="The content of the memory filesystem section.") + + num_tokens_tool_usage_rules: int = Field(0, description="The number of tokens in the tool usage rules section.") + tool_usage_rules: Optional[str] = Field(None, description="The content of the tool usage rules section.") + + num_tokens_directories: int = Field(0, description="The number of tokens in the directories section (attached sources).") + directories: Optional[str] = Field(None, description="The content of the directories section.") + num_tokens_summary_memory: int = Field(..., description="The number of tokens in the summary memory.") summary_memory: Optional[str] = Field(None, description="The content of the summary memory.") @@ -61,6 +72,7 @@ class Memory(BaseModel, validate_assignment=True): """ agent_type: Optional[Union["AgentType", str]] = Field(None, description="Agent type controlling prompt rendering.") + git_enabled: bool = Field(False, description="Whether this agent uses git-backed memory with structured labels.") blocks: List[Block] = Field(..., description="Memory blocks contained in the agent's in-context memory") file_blocks: List[FileBlock] = Field( default_factory=list, description="Special blocks representing the agent's in-context memory of an attached file" @@ -106,16 +118,36 @@ class Memory(BaseModel, validate_assignment=True): """Deprecated. Async setter that stores the string but does not validate or use it.""" self.prompt_template = prompt_template + def _get_renderable_blocks(self) -> list: + """Return blocks that should be rendered into . + + For git-memory-enabled agents, only system/ blocks are rendered. + For standard agents, all blocks are rendered. + """ + if self.git_enabled: + return [b for b in self.blocks if b.label and b.label.startswith("system/")] + return list(self.blocks) + + def _display_label(self, label: str) -> str: + """Return the XML tag name for a block label. + + For git-memory-enabled agents, strip the 'system/' prefix so + system/human renders as . + """ + if self.git_enabled and label.startswith("system/"): + return label.removeprefix("system/") + return label + @trace_method def _render_memory_blocks_standard(self, s: StringIO): - if len(self.blocks) == 0: - # s.write("") # TODO: consider empty tags + renderable = self._get_renderable_blocks() + if len(renderable) == 0: s.write("") return s.write("\nThe following memory blocks are currently engaged in your core memory unit:\n\n") - for idx, block in enumerate(self.blocks): - label = block.label or "block" + for idx, block in enumerate(renderable): + label = self._display_label(block.label or "block") value = block.value or "" desc = block.description or "" chars_current = len(value) @@ -135,14 +167,15 @@ class Memory(BaseModel, validate_assignment=True): s.write(f"{value}\n") s.write("\n") s.write(f"\n") - if idx != len(self.blocks) - 1: + if idx != len(renderable) - 1: s.write("\n") s.write("\n") def _render_memory_blocks_line_numbered(self, s: StringIO): + renderable = self._get_renderable_blocks() s.write("\nThe following memory blocks are currently engaged in your core memory unit:\n\n") - for idx, block in enumerate(self.blocks): - label = block.label or "block" + for idx, block in enumerate(renderable): + label = self._display_label(block.label or "block") value = block.value or "" desc = block.description or "" limit = block.limit if block.limit is not None else 0 @@ -164,10 +197,132 @@ class Memory(BaseModel, validate_assignment=True): s.write(f"{i}→ {line}\n") s.write("\n") s.write(f"\n") - if idx != len(self.blocks) - 1: + if idx != len(renderable) - 1: s.write("\n") s.write("\n") + def _render_memory_blocks_git(self, s: StringIO): + """Render memory blocks as individual file tags with YAML frontmatter. + + Each block is rendered as ---frontmatter---value, + matching the format stored in the git repo. Labels without a 'system/' + prefix get one added automatically. + """ + renderable = self._get_renderable_blocks() + if not renderable: + return + + for idx, block in enumerate(renderable): + label = block.label or "block" + # Ensure system/ prefix + if not label.startswith("system/"): + label = f"system/{label}" + tag = f"{label}.md" + value = block.value or "" + + s.write(f"\n\n<{tag}>\n") + + # Build frontmatter (same fields as serialize_block) + front_lines = [] + if block.description: + front_lines.append(f"description: {block.description}") + if block.limit is not None: + front_lines.append(f"limit: {block.limit}") + if getattr(block, "read_only", False): + front_lines.append("read_only: true") + + if front_lines: + s.write("---\n") + s.write("\n".join(front_lines)) + s.write("\n---\n") + + s.write(f"{value}\n") + s.write(f"") + + def _render_memory_filesystem(self, s: StringIO): + """Render a filesystem tree view of all memory blocks. + + Only rendered for git-memory-enabled agents. Uses box-drawing + characters (├──, └──, │) like the Unix `tree` command, while keeping + deterministic ordering (directories first, then files, alphabetically). + """ + if not self.blocks: + return + + # Build tree structure from block labels. + # + # IMPORTANT: labels are path-like (e.g. "system/human"). In real filesystems a + # path component cannot be both a directory and a file, but our block namespace + # can contain collisions like: + # - "system" (a block) + # - "system/human" (a block under a virtual "system/" directory) + # + # When we detect a collision, we convert the would-be directory node into a + # dict and store the colliding leaf block under LEAF_KEY. + LEAF_KEY = "__block__" + + tree: dict = {} + for block in self.blocks: + label = block.label or "block" + parts = [p for p in label.split("/") if p] + if not parts: + parts = ["block"] + + node: dict = tree + for part in parts[:-1]: + existing = node.get(part) + if existing is None: + node[part] = {} + elif not isinstance(existing, dict): + # Collision: leaf at `part` and now we need it to be a directory. + node[part] = {LEAF_KEY: existing} + node = node[part] # type: ignore[assignment] + + leaf = parts[-1] + existing_leaf = node.get(leaf) + if existing_leaf is None: + node[leaf] = block + elif isinstance(existing_leaf, dict): + # Collision: directory at `leaf` already exists; attach the leaf block. + existing_leaf[LEAF_KEY] = block + else: + # Duplicate leaf label; last writer wins. + node[leaf] = block + + s.write("\n\n\n") + + def _render_tree(node: dict, prefix: str = ""): + # Sort: directories first, then files. If a node is both a directory and a + # leaf (LEAF_KEY present), show both / and .md. + dirs = [] + files = [] + for name, val in node.items(): + if name == LEAF_KEY: + continue + if isinstance(val, dict): + dirs.append(name) + if LEAF_KEY in val: + files.append(name) + else: + files.append(name) + + dirs = sorted(dirs) + files = sorted(files) + entries = [(d, True) for d in dirs] + [(f, False) for f in files] + + for i, (name, is_dir) in enumerate(entries): + is_last = i == len(entries) - 1 + connector = "└── " if is_last else "├── " + if is_dir: + s.write(f"{prefix}{connector}{name}/\n") + extension = " " if is_last else "│ " + _render_tree(node[name], prefix + extension) + else: + s.write(f"{prefix}{connector}{name}.md\n") + + _render_tree(tree) + s.write("") + def _render_directories_common(self, s: StringIO, sources, max_files_open): s.write("\n\n\n") if max_files_open is not None: @@ -286,7 +441,11 @@ class Memory(BaseModel, validate_assignment=True): # Memory blocks (not for react/workflow). Always include wrapper for preview/tests. if not is_react: - if is_line_numbered: + if self.git_enabled: + # Git-enabled: filesystem tree + file-style block rendering + self._render_memory_filesystem(s) + self._render_memory_blocks_git(s) + elif is_line_numbered: self._render_memory_blocks_line_numbered(s) else: self._render_memory_blocks_standard(s) @@ -376,7 +535,7 @@ class BasicBlockMemory(Memory): """ super().__init__(blocks=blocks) - def core_memory_append(agent_state: "AgentState", label: str, content: str) -> Optional[str]: # type: ignore + def core_memory_append(agent_state: "AgentState", label: str, content: str) -> Optional[str]: # type: ignore # noqa: F821 """ Append to the contents of core memory. @@ -392,7 +551,7 @@ class BasicBlockMemory(Memory): agent_state.memory.update_block_value(label=label, value=new_value) return None - def core_memory_replace(agent_state: "AgentState", label: str, old_content: str, new_content: str) -> Optional[str]: # type: ignore + def core_memory_replace(agent_state: "AgentState", label: str, old_content: str, new_content: str) -> Optional[str]: # type: ignore # noqa: F821 """ Replace the contents of core memory. To delete memories, use an empty string for new_content. diff --git a/letta/schemas/memory_repo.py b/letta/schemas/memory_repo.py new file mode 100644 index 00000000..c306a767 --- /dev/null +++ b/letta/schemas/memory_repo.py @@ -0,0 +1,44 @@ +"""Pydantic schemas for git-based memory repositories. + +These are used internally by the git-backed block/memory repository services. + +Note: REST "sync" request/response schemas were removed when we switched to +clients interacting with repositories directly via git smart HTTP. +""" + +from __future__ import annotations + +from datetime import datetime +from typing import List, Optional + +from pydantic import Field + +from letta.schemas.letta_base import LettaBase + + +class MemoryCommit(LettaBase): + """Represents a commit in the memory repository.""" + + __id_prefix__ = "memcommit" + + sha: str = Field(..., description="Commit SHA (40-char hex).") + parent_sha: Optional[str] = Field(None, description="Parent commit SHA.") + message: str = Field(..., description="Commit message.") + + author_type: str = Field(..., description="Author type: agent, user, system.") + author_id: str = Field(..., description="Author ID.") + author_name: Optional[str] = Field(None, description="Human-readable author name.") + + timestamp: datetime = Field(..., description="Commit timestamp.") + + files_changed: List[str] = Field(default_factory=list, description="List of changed file paths.") + additions: int = Field(default=0, description="Number of lines/chars added.") + deletions: int = Field(default=0, description="Number of lines/chars deleted.") + + +class FileChange(LettaBase): + """Represents a file change for committing.""" + + path: str = Field(..., description="File path within repository.") + content: Optional[str] = Field(None, description="New file content (None for delete).") + change_type: str = Field(default="modify", description="Change type: add, modify, delete.") diff --git a/letta/schemas/message.py b/letta/schemas/message.py index 22ed104a..6368db2f 100644 --- a/letta/schemas/message.py +++ b/letta/schemas/message.py @@ -11,10 +11,9 @@ import uuid from collections import OrderedDict from datetime import datetime, timezone from enum import Enum -from typing import Annotated, Any, Dict, List, Literal, Optional, Union +from typing import Any, Dict, List, Literal, Optional, Union from openai.types.chat.chat_completion_message_tool_call import ChatCompletionMessageToolCall as OpenAIToolCall, Function as OpenAIFunction -from openai.types.responses import ResponseReasoningItem from pydantic import BaseModel, Field, field_validator, model_validator from letta.constants import DEFAULT_MESSAGE_TOOL, DEFAULT_MESSAGE_TOOL_KWARG, REQUEST_HEARTBEAT_PARAM, TOOL_CALL_ID_MAX_LEN @@ -37,6 +36,7 @@ from letta.schemas.letta_message import ( MessageType, ReasoningMessage, ReasoningMessageListResult, + SummaryMessage, SystemMessage, SystemMessageListResult, ToolCall, @@ -45,6 +45,7 @@ from letta.schemas.letta_message import ( ToolReturnMessage, UserMessage, UserMessageListResult, + extract_compaction_stats_from_packed_json, ) from letta.schemas.letta_message_content import ( ImageContent, @@ -61,7 +62,7 @@ from letta.schemas.letta_message_content import ( get_letta_message_content_union_str_json_schema, ) from letta.system import unpack_message -from letta.utils import parse_json, validate_function_response +from letta.utils import parse_json, sanitize_tool_call_id, validate_function_response def truncate_tool_return(content: Optional[str], limit: Optional[int]) -> Optional[str]: @@ -125,6 +126,7 @@ def add_inner_thoughts_to_tool_call( class MessageCreateType(str, Enum): message = "message" approval = "approval" + tool_return = "tool_return" class MessageCreateBase(BaseModel): @@ -185,7 +187,24 @@ class ApprovalCreate(MessageCreateBase): return self -MessageCreateUnion = Union[MessageCreate, ApprovalCreate] +class ToolReturnCreate(MessageCreateBase): + """Submit tool return(s) from client-side tool execution. + + This is the preferred way to send tool results back to the agent after + client-side tool execution. It is equivalent to sending an ApprovalCreate + with tool return approvals, but provides a cleaner API for the common case. + """ + + type: Literal[MessageCreateType.tool_return] = Field( + default=MessageCreateType.tool_return, description="The message type to be created." + ) + tool_returns: List[LettaToolReturn] = Field( + ..., + description="List of tool returns from client-side execution", + ) + + +MessageCreateUnion = Union[MessageCreate, ApprovalCreate, ToolReturnCreate] class MessageUpdate(BaseModel): @@ -290,7 +309,7 @@ class Message(BaseMessage): @field_validator("role") @classmethod def validate_role(cls, v: str) -> str: - roles = ["system", "assistant", "user", "tool", "approval"] + roles = ["system", "assistant", "user", "tool", "approval", "summary"] assert v in roles, f"Role must be one of {roles}" return v @@ -320,6 +339,7 @@ class Message(BaseMessage): reverse: bool = True, include_err: Optional[bool] = None, text_is_assistant_message: bool = False, + convert_summary_to_user: bool = True, ) -> List[LettaMessage]: if use_assistant_message: message_ids_to_remove = [] @@ -352,6 +372,7 @@ class Message(BaseMessage): reverse=reverse, include_err=include_err, text_is_assistant_message=text_is_assistant_message, + convert_summary_to_user=convert_summary_to_user, ) ] @@ -365,6 +386,7 @@ class Message(BaseMessage): reverse: bool = True, include_err: Optional[bool] = None, text_is_assistant_message: bool = False, + convert_summary_to_user: bool = True, ) -> List[LettaMessageSearchResult]: """Convert MessageSearchResult objects into LettaMessageSearchResult objects. @@ -385,6 +407,7 @@ class Message(BaseMessage): reverse=reverse, include_err=include_err, text_is_assistant_message=text_is_assistant_message, + convert_summary_to_user=convert_summary_to_user, ) for lm in letta_messages: @@ -445,8 +468,14 @@ class Message(BaseMessage): reverse: bool = True, include_err: Optional[bool] = None, text_is_assistant_message: bool = False, + convert_summary_to_user: bool = True, ) -> List[LettaMessage]: - """Convert message object (in DB format) to the style used by the original Letta API""" + """Convert message object (in DB format) to the style used by the original Letta API + + Args: + convert_summary_to_user: If True (default), summary messages are returned as UserMessage + for backward compatibility. If False, return as SummaryMessage. + """ messages = [] if self.role == MessageRole.assistant: @@ -468,6 +497,8 @@ class Message(BaseMessage): messages.append(self._convert_user_message()) elif self.role == MessageRole.system: messages.append(self._convert_system_message()) + elif self.role == MessageRole.summary: + messages.append(self._convert_summary_message(as_user_message=convert_summary_to_user)) elif self.role == MessageRole.approval: if self.content: messages.extend(self._convert_reasoning_messages(text_is_assistant_message=text_is_assistant_message)) @@ -734,7 +765,12 @@ class Message(BaseMessage): func_args = parse_json(tool_call.function.arguments) message_string = validate_function_response(func_args[assistant_message_tool_kwarg], 0, truncate=False) except KeyError: - raise ValueError(f"Function call {tool_call.function.name} missing {assistant_message_tool_kwarg} argument") + logger.error( + "Function call %s missing %s argument; skipping assistant message conversion", + tool_call.function.name, + assistant_message_tool_kwarg, + ) + continue # Ensure content is a string (validate_function_response can return dict) if isinstance(message_string, dict): @@ -1036,6 +1072,49 @@ class Message(BaseMessage): run_id=self.run_id, ) + def _convert_summary_message(self, as_user_message: bool = True) -> Union[SummaryMessage, UserMessage]: + """Convert summary role message to SummaryMessage or UserMessage. + + Args: + as_user_message: If True, return UserMessage for backward compatibility with + clients that don't support SummaryMessage. If False, return SummaryMessage. + """ + if self.content and len(self.content) == 1 and isinstance(self.content[0], TextContent): + text_content = self.content[0].text + else: + raise ValueError(f"Invalid summary message (no text object on message): {self.content}") + + # Unpack the summary from the packed JSON format + # The packed format is: {"type": "system_alert", "message": "...", "time": "...", "compaction_stats": {...}} + summary = unpack_message(text_content) + + # Extract compaction_stats from the packed JSON using shared helper + compaction_stats = extract_compaction_stats_from_packed_json(text_content) + + if as_user_message: + # Return as UserMessage for backward compatibility + return UserMessage( + id=self.id, + date=self.created_at, + content=summary, + name=self.name, + otid=self.otid, + sender_id=self.sender_id, + step_id=self.step_id, + is_err=self.is_err, + run_id=self.run_id, + ) + else: + return SummaryMessage( + id=self.id, + date=self.created_at, + summary=summary, + otid=self.otid, + step_id=self.step_id, + run_id=self.run_id, + compaction_stats=compaction_stats, + ) + @staticmethod def dict_to_message( agent_id: str, @@ -1071,7 +1150,7 @@ class Message(BaseMessage): tool_returns = [ToolReturn(**tr) for tr in openai_message_dict["tool_returns"]] # TODO(caren) bad assumption here that "reasoning_content" always comes before "redacted_reasoning_content" - if "reasoning_content" in openai_message_dict and openai_message_dict["reasoning_content"]: + if openai_message_dict.get("reasoning_content"): content.append( ReasoningContent( reasoning=openai_message_dict["reasoning_content"], @@ -1083,13 +1162,13 @@ class Message(BaseMessage): ), ), ) - if "redacted_reasoning_content" in openai_message_dict and openai_message_dict["redacted_reasoning_content"]: + if openai_message_dict.get("redacted_reasoning_content"): content.append( RedactedReasoningContent( data=str(openai_message_dict["redacted_reasoning_content"]), ), ) - if "omitted_reasoning_content" in openai_message_dict and openai_message_dict["omitted_reasoning_content"]: + if openai_message_dict.get("omitted_reasoning_content"): content.append( OmittedReasoningContent(), ) @@ -1297,6 +1376,14 @@ class Message(BaseMessage): "role": self.role, } + elif self.role == "summary": + # Summary messages are converted to user messages (same as current system_alert behavior) + assert text_content is not None, vars(self) + openai_message = { + "content": text_content, + "role": "user", + } + elif self.role == "assistant" or self.role == "approval": try: assert self.tool_calls is not None or text_content is not None, vars(self) @@ -1448,11 +1535,17 @@ class Message(BaseMessage): message_dicts = [] if self.role == "system": - assert len(self.content) == 1 and isinstance(self.content[0], TextContent), vars(self) + text_parts = [c.text for c in (self.content or []) if isinstance(c, TextContent)] + if not text_parts: + logger.warning( + f"System message {self.id} has no text content, skipping: roles={[type(c).__name__ for c in (self.content or [])]}" + ) + return message_dicts + system_text = "\n\n".join(text_parts) message_dicts.append( { "role": "developer", - "content": self.content[0].text, + "content": system_text, } ) @@ -1474,6 +1567,16 @@ class Message(BaseMessage): message_dicts.append(user_dict) + elif self.role == "summary": + # Summary messages are converted to user messages (same as current system_alert behavior) + assert self.content and len(self.content) == 1 and isinstance(self.content[0], TextContent), vars(self) + message_dicts.append( + { + "role": "user", + "content": self.content[0].text, + } + ) + elif self.role == "assistant" or self.role == "approval": # Validate that message has content OpenAI Responses API can process if self.tool_calls is None and (self.content is None or len(self.content) == 0): @@ -1753,10 +1856,20 @@ class Message(BaseMessage): if self.role == "system": # NOTE: this is not for system instructions, but instead system "events" - assert text_content is not None, vars(self) + system_text = text_content + if system_text is None: + text_parts = [c.text for c in (self.content or []) if isinstance(c, TextContent)] + if not text_parts: + from letta.log import get_logger as _get_logger + + _get_logger(__name__).warning( + f"System message {self.id} has no text content, skipping: roles={[type(c).__name__ for c in (self.content or [])]}" + ) + return None + system_text = "\n\n".join(text_parts) # Two options here, we would use system.package_system_message, # or use a more Anthropic-specific packaging ie xml tags - user_system_event = add_xml_tag(string=f"SYSTEM ALERT: {text_content}", xml_tag="event") + user_system_event = add_xml_tag(string=f"SYSTEM ALERT: {system_text}", xml_tag="event") anthropic_message = { "content": user_system_event, "role": "user", @@ -1793,6 +1906,14 @@ class Message(BaseMessage): "role": self.role, } + elif self.role == "summary": + # Summary messages are converted to user messages (same as current system_alert behavior) + assert text_content is not None, vars(self) + anthropic_message = { + "content": text_content, + "role": "user", + } + elif self.role == "assistant" or self.role == "approval": # Validate that message has content Anthropic API can process if self.tool_calls is None and (self.content is None or len(self.content) == 0): @@ -1889,7 +2010,7 @@ class Message(BaseMessage): content.append( { "type": "tool_use", - "id": tool_call.id, + "id": sanitize_tool_call_id(tool_call.id), "name": tool_call.function.name, "input": tool_call_input, } @@ -1933,7 +2054,7 @@ class Message(BaseMessage): content.append( { "type": "tool_result", - "tool_use_id": resolved_tool_call_id, + "tool_use_id": sanitize_tool_call_id(resolved_tool_call_id), "content": tool_result_content, } ) @@ -2053,6 +2174,14 @@ class Message(BaseMessage): "parts": content_parts, } + elif self.role == "summary": + # Summary messages are converted to user messages (same as current system_alert behavior) + assert text_content is not None, vars(self) + google_ai_message = { + "role": "user", + "parts": [{"text": text_content}], + } + elif self.role == "assistant" or self.role == "approval": # Validate that message has content Google API can process if self.tool_calls is None and text_content is None and len(self.content) <= 1: @@ -2108,7 +2237,7 @@ class Message(BaseMessage): try: # NOTE: Google AI wants actual JSON objects, not strings function_args = parse_json(function_args) - except: + except Exception: raise UserWarning(f"Failed to parse JSON function args: {function_args}") function_args = {"args": function_args} @@ -2198,7 +2327,7 @@ class Message(BaseMessage): try: function_response = parse_json(text_content) - except: + except Exception: function_response = {"function_response": text_content} parts.append( @@ -2231,7 +2360,7 @@ class Message(BaseMessage): # NOTE: Google AI API wants the function response as JSON only, no string try: function_response = parse_json(legacy_content) - except: + except Exception: function_response = {"function_response": legacy_content} google_ai_message = { @@ -2290,6 +2419,10 @@ class Message(BaseMessage): return self.role == "approval" and self.tool_calls is None and self.approve is not None def is_summarization_message(self) -> bool: + # First-class summary role (new format) + if self.role == "summary": + return True + # Legacy format: user message with system_alert content return ( self.role == "user" and self.content is not None diff --git a/letta/schemas/model.py b/letta/schemas/model.py index f5d5fdac..fa1c14cb 100644 --- a/letta/schemas/model.py +++ b/letta/schemas/model.py @@ -212,7 +212,7 @@ class ModelSettings(BaseModel): # model: str = Field(..., description="The name of the model.") max_output_tokens: int = Field(4096, description="The maximum number of tokens the model can generate.") - parallel_tool_calls: bool = Field(False, description="Whether to enable parallel tool calling.") + parallel_tool_calls: bool = Field(True, description="Whether to enable parallel tool calling.") class OpenAIReasoning(BaseModel): @@ -374,12 +374,22 @@ class XAIModelSettings(ModelSettings): } +class ZAIThinking(BaseModel): + """Thinking configuration for ZAI GLM-4.5+ models.""" + + type: Literal["enabled", "disabled"] = Field("enabled", description="Whether thinking is enabled or disabled.") + clear_thinking: bool = Field(False, description="If False, preserved thinking is used (recommended for agents).") + + class ZAIModelSettings(ModelSettings): """Z.ai (ZhipuAI) model configuration (OpenAI-compatible).""" provider_type: Literal[ProviderType.zai] = Field(ProviderType.zai, description="The type of the provider.") temperature: float = Field(0.7, description="The temperature of the model.") response_format: Optional[ResponseFormatUnion] = Field(None, description="The response format for the model.") + thinking: ZAIThinking = Field( + ZAIThinking(type="enabled", clear_thinking=False), description="The thinking configuration for GLM-4.5+ models." + ) def _to_legacy_config_params(self) -> dict: return { @@ -388,6 +398,7 @@ class ZAIModelSettings(ModelSettings): "response_format": self.response_format, "parallel_tool_calls": self.parallel_tool_calls, "strict": False, # ZAI does not support strict mode + "extended_thinking": self.thinking.type == "enabled", } diff --git a/letta/schemas/openai/chat_completion_request.py b/letta/schemas/openai/chat_completion_request.py index c0939257..8eb2ce9e 100644 --- a/letta/schemas/openai/chat_completion_request.py +++ b/letta/schemas/openai/chat_completion_request.py @@ -1,6 +1,6 @@ from typing import Any, Dict, List, Literal, Optional, Union -from pydantic import BaseModel, Field, field_validator +from pydantic import BaseModel, field_validator class SystemMessage(BaseModel): @@ -143,6 +143,7 @@ class ChatCompletionRequest(BaseModel): temperature: Optional[float] = 1 top_p: Optional[float] = 1 user: Optional[str] = None # unique ID of the end-user (for monitoring) + prompt_cache_retention: Optional[Literal["in_memory", "24h"]] = None parallel_tool_calls: Optional[bool] = None instructions: Optional[str] = None verbosity: Optional[Literal["low", "medium", "high"]] = None # For verbosity control in GPT-5 models diff --git a/letta/schemas/openai/responses_request.py b/letta/schemas/openai/responses_request.py index aeeefa23..112fcb91 100644 --- a/letta/schemas/openai/responses_request.py +++ b/letta/schemas/openai/responses_request.py @@ -29,7 +29,7 @@ class ResponsesRequest(BaseModel): parallel_tool_calls: Optional[bool] = Field(default=NOT_GIVEN) previous_response_id: Optional[str] = Field(default=NOT_GIVEN) prompt: Optional[ResponsePromptParam] = Field(default=NOT_GIVEN) - prompt_cache_key: Optional[str] = Field(default=NOT_GIVEN) + prompt_cache_retention: Optional[Literal["in_memory", "24h"]] = Field(default=NOT_GIVEN) reasoning: Optional[Reasoning] = Field(default=NOT_GIVEN) safety_identifier: Optional[str] = Field(default=NOT_GIVEN) service_tier: Optional[Literal["auto", "default", "flex", "scale", "priority"]] = Field(default=NOT_GIVEN) diff --git a/letta/schemas/provider_model.py b/letta/schemas/provider_model.py index fd948fd8..0caf889d 100644 --- a/letta/schemas/provider_model.py +++ b/letta/schemas/provider_model.py @@ -1,4 +1,3 @@ -from datetime import datetime from typing import Optional from pydantic import Field diff --git a/letta/schemas/providers/__init__.py b/letta/schemas/providers/__init__.py index 2790ba7e..40e0e333 100644 --- a/letta/schemas/providers/__init__.py +++ b/letta/schemas/providers/__init__.py @@ -24,13 +24,6 @@ from .xai import XAIProvider from .zai import ZAIProvider __all__ = [ - # Base classes - "Provider", - "ProviderBase", - "ProviderCreate", - "ProviderUpdate", - "ProviderCheck", - # Provider implementations "AnthropicProvider", "AzureProvider", "BedrockProvider", @@ -40,16 +33,21 @@ __all__ = [ "GoogleAIProvider", "GoogleVertexProvider", "GroqProvider", - "LettaProvider", "LMStudioOpenAIProvider", + "LettaProvider", "MiniMaxProvider", "MistralProvider", "OllamaProvider", "OpenAIProvider", - "TogetherProvider", - "VLLMProvider", # Replaces ChatCompletions and Completions + "OpenRouterProvider", + "Provider", + "ProviderBase", + "ProviderCheck", + "ProviderCreate", + "ProviderUpdate", "SGLangProvider", + "TogetherProvider", + "VLLMProvider", "XAIProvider", "ZAIProvider", - "OpenRouterProvider", ] diff --git a/letta/schemas/providers/anthropic.py b/letta/schemas/providers/anthropic.py index 2e2faf6c..554519b3 100644 --- a/letta/schemas/providers/anthropic.py +++ b/letta/schemas/providers/anthropic.py @@ -108,6 +108,16 @@ MODEL_LIST = [ "name": "claude-opus-4-5-20251101", "context_window": 200000, }, + ## Opus 4.6 + { + "name": "claude-opus-4-6", + "context_window": 200000, + }, + ## Sonnet 4.6 + { + "name": "claude-sonnet-4-6", + "context_window": 200000, + }, ] @@ -134,7 +144,9 @@ class AnthropicProvider(Provider): def get_default_max_output_tokens(self, model_name: str) -> int: """Get the default max output tokens for Anthropic models.""" - if "opus" in model_name: + if "claude-opus-4-6" in model_name or "claude-sonnet-4-6" in model_name: + return 21000 # Opus 4.6 / Sonnet 4.6 supports up to 128k with streaming, use 21k as default + elif "opus" in model_name: return 16384 elif "sonnet" in model_name: return 16384 @@ -169,10 +181,19 @@ class AnthropicProvider(Provider): else: raise ValueError("No API key provided") - models = await anthropic_client.models.list() - models_json = models.model_dump() - assert "data" in models_json, f"Anthropic model query response missing 'data' field: {models_json}" - models_data = models_json["data"] + try: + # Auto-paginate through all pages to ensure we get every model. + # The default page size is 20, and Anthropic now has more models than that. + models_data = [] + async for model in anthropic_client.models.list(): + models_data.append(model.model_dump()) + except AttributeError as e: + if "_set_private_attributes" in str(e): + raise LLMError( + message="Anthropic API returned an unexpected non-JSON response. Verify the API key and endpoint.", + code=ErrorCode.INTERNAL_SERVER_ERROR, + ) + raise return self._list_llm_models(models_data) @@ -194,7 +215,7 @@ class AnthropicProvider(Provider): logger.warning(f"Couldn't find context window size for model {model['id']}, defaulting to 200,000") model["context_window"] = 200000 - # Optional override: enable 1M context for Sonnet 4/4.5 when flag is set + # Optional override: enable 1M context for Sonnet 4/4.5 or Opus 4.6 when flag is set try: from letta.settings import model_settings @@ -202,6 +223,8 @@ class AnthropicProvider(Provider): model["id"].startswith("claude-sonnet-4") or model["id"].startswith("claude-sonnet-4-5") ): model["context_window"] = 1_000_000 + elif model_settings.anthropic_opus_1m and model["id"].startswith("claude-opus-4-6"): + model["context_window"] = 1_000_000 except Exception: pass diff --git a/letta/schemas/providers/azure.py b/letta/schemas/providers/azure.py index da074420..11fa2452 100644 --- a/letta/schemas/providers/azure.py +++ b/letta/schemas/providers/azure.py @@ -2,11 +2,14 @@ from collections import defaultdict from typing import ClassVar, Literal import httpx -from openai import AsyncAzureOpenAI +from openai import AsyncAzureOpenAI, AuthenticationError, PermissionDeniedError from pydantic import Field, field_validator from letta.constants import DEFAULT_EMBEDDING_CHUNK_SIZE, LLM_MAX_CONTEXT_WINDOW -from letta.errors import ErrorCode, LLMAuthenticationError +from letta.errors import ErrorCode, LLMAuthenticationError, LLMPermissionDeniedError +from letta.log import get_logger + +logger = get_logger(__name__) from letta.schemas.embedding_config import EmbeddingConfig from letta.schemas.enums import ProviderCategory, ProviderType from letta.schemas.llm_config import LLMConfig @@ -43,6 +46,12 @@ class AzureProvider(Provider): def replace_none_with_default(cls, v): return v if v is not None else cls.LATEST_API_VERSION + @staticmethod + def _is_v1_endpoint(base_url: str) -> bool: + if not base_url: + return False + return base_url.rstrip("/").endswith("/openai/v1") + def get_azure_chat_completions_endpoint(self, model: str): return f"{self.base_url}/openai/deployments/{model}/chat/completions?api-version={self.api_version}" @@ -57,14 +66,61 @@ class AzureProvider(Provider): # That's the only api version that works with this deployments endpoint return f"{self.base_url}/openai/deployments?api-version=2023-03-15-preview" + def _get_resource_base_url(self) -> str: + """Derive the Azure resource base URL (e.g. https://project.openai.azure.com) from any endpoint format.""" + url = self.base_url.rstrip("/") + if url.endswith("/openai/v1"): + return url[: -len("/openai/v1")] + return url + + async def _get_deployments(self, api_key: str | None) -> list[dict]: + """Fetch deployments using the legacy 2023-03-15-preview endpoint. + + Works for both v1 and legacy endpoints since it hits the resource base URL. + Returns the raw deployment dicts (each has 'id' = deployment name). + """ + resource_base = self._get_resource_base_url() + url = f"{resource_base}/openai/deployments?api-version=2023-03-15-preview" + + headers = {"Content-Type": "application/json"} + if api_key is not None: + headers["api-key"] = f"{api_key}" + + try: + timeout = httpx.Timeout(15.0, connect=10.0) + async with httpx.AsyncClient(timeout=timeout) as http_client: + response = await http_client.get(url, headers=headers) + response.raise_for_status() + except httpx.TimeoutException as e: + raise RuntimeError(f"Azure API timeout after 15s: {e}") + except httpx.HTTPStatusError as e: + raise RuntimeError(f"Failed to retrieve deployment list: {e}") + + return response.json().get("data", []) + async def azure_openai_get_deployed_model_list(self) -> list: """https://learn.microsoft.com/en-us/rest/api/azureopenai/models/list?view=rest-azureopenai-2023-05-15&tabs=HTTP""" api_key = await self.api_key_enc.get_plaintext_async() if self.api_key_enc else None + + if self._is_v1_endpoint(self.base_url): + # The v1 /models endpoint returns base model names (e.g. "gpt-5.2-chat-2025-12-11") + # but inference calls require deployment names (e.g. "gpt-5.2-chat"). + # Query the legacy deployments endpoint to get actual deployment names. + return await self._get_deployments(api_key) + + # Legacy path: use Azure SDK + deployments endpoint client = AsyncAzureOpenAI(api_key=api_key, api_version=self.api_version, azure_endpoint=self.base_url) try: models_list = await client.models.list() + except (AuthenticationError, PermissionDeniedError): + # Re-raise auth/permission errors so they're properly handled upstream + raise + except AttributeError as e: + if "_set_private_attributes" in str(e): + logger.warning(f"Azure endpoint at {self.base_url} returned an unexpected non-JSON response: {e}") + return [] except Exception: return [] @@ -112,6 +168,37 @@ class AzureProvider(Provider): async def list_llm_models_async(self) -> list[LLMConfig]: model_list = await self.azure_openai_get_deployed_model_list() + + if self._is_v1_endpoint(self.base_url): + # v1 path: follow OpenAIProvider pattern with litellm context window lookup + configs = [] + for model in model_list: + model_name = model.get("id") + if not model_name: + continue + + # Use capabilities if present, otherwise accept all (Azure deployments are user-curated) + capabilities = model.get("capabilities") + if capabilities and capabilities.get("chat_completion") is not None: + if not capabilities.get("chat_completion"): + continue + + context_window_size = await self.get_model_context_window_async(model_name) + configs.append( + LLMConfig( + model=model_name, + model_endpoint_type="azure", + model_endpoint=self.base_url, + context_window=context_window_size, + handle=self.get_handle(model_name), + max_tokens=self.get_default_max_output_tokens(model_name), + provider_name=self.name, + provider_category=self.provider_category, + ) + ) + return configs + + # Legacy path # Extract models that support text generation model_options = [m for m in model_list if m.get("capabilities").get("chat_completion") == True] @@ -135,6 +222,38 @@ class AzureProvider(Provider): return configs async def list_embedding_models_async(self) -> list[EmbeddingConfig]: + model_list = await self.azure_openai_get_deployed_model_list() + + if self._is_v1_endpoint(self.base_url): + # v1 path: use base URL as endpoint, filter by capabilities or name + configs = [] + for model in model_list: + model_name = model.get("id") + if not model_name: + continue + + # Use capabilities if present, otherwise filter by name + capabilities = model.get("capabilities") + if capabilities and capabilities.get("embeddings") is not None: + if not capabilities.get("embeddings"): + continue + elif "embedding" not in model_name: + continue + + configs.append( + EmbeddingConfig( + embedding_model=model_name, + embedding_endpoint_type="azure", + embedding_endpoint=self.base_url, + embedding_dim=768, + embedding_chunk_size=DEFAULT_EMBEDDING_CHUNK_SIZE, + handle=self.get_handle(model_name, is_embedding=True), + batch_size=1024, + ) + ) + return configs + + # Legacy path def valid_embedding_model(m: dict, require_embedding_in_name: bool = True): valid_name = True if require_embedding_in_name: @@ -142,9 +261,7 @@ class AzureProvider(Provider): return m.get("capabilities").get("embeddings") == True and valid_name - model_list = await self.azure_openai_get_deployed_model_list() # Extract models that support embeddings - model_options = [m for m in model_list if valid_embedding_model(m)] configs = [] @@ -169,6 +286,23 @@ class AzureProvider(Provider): llm_default = LLM_MAX_CONTEXT_WINDOW.get(model_name, 4096) return AZURE_MODEL_TO_CONTEXT_LENGTH.get(model_name, llm_default) + async def get_model_context_window_async(self, model_name: str) -> int | None: + """Get context window size, using litellm specs for v1 endpoints or hardcoded map for legacy.""" + if self._is_v1_endpoint(self.base_url): + from letta.model_specs.litellm_model_specs import get_context_window + + # Litellm keys Azure models with an "azure/" prefix + context_window = await get_context_window(f"azure/{model_name}") + if context_window is not None: + return context_window + # Try without prefix as fallback + context_window = await get_context_window(model_name) + if context_window is not None: + return context_window + # Fall back to hardcoded map, then default + return self.get_model_context_window(model_name) + return self.get_model_context_window(model_name) + async def check_api_key(self): api_key = await self.api_key_enc.get_plaintext_async() if self.api_key_enc else None if not api_key: @@ -176,5 +310,8 @@ class AzureProvider(Provider): try: await self.list_llm_models_async() + except (LLMAuthenticationError, LLMPermissionDeniedError): + # Re-raise specific LLM errors as-is + raise except Exception as e: raise LLMAuthenticationError(message=f"Failed to authenticate with Azure: {e}", code=ErrorCode.UNAUTHENTICATED) diff --git a/letta/schemas/providers/base.py b/letta/schemas/providers/base.py index 73e4a239..d527d4d5 100644 --- a/letta/schemas/providers/base.py +++ b/letta/schemas/providers/base.py @@ -90,7 +90,6 @@ class Provider(ProviderBase): def list_llm_models(self) -> list[LLMConfig]: """List available LLM models (deprecated: use list_llm_models_async)""" import asyncio - import warnings logger.warning("list_llm_models is deprecated, use list_llm_models_async instead", stacklevel=2) @@ -115,7 +114,6 @@ class Provider(ProviderBase): def list_embedding_models(self) -> list[EmbeddingConfig]: """List available embedding models (deprecated: use list_embedding_models_async)""" import asyncio - import warnings logger.warning("list_embedding_models is deprecated, use list_embedding_models_async instead", stacklevel=2) @@ -263,6 +261,11 @@ class ProviderCreate(ProviderBase): base_url: str | None = Field(None, description="Base URL used for requests to the provider.") api_version: str | None = Field(None, description="API version used for requests to the provider.") + @field_validator("api_key", "access_key", mode="before") + @classmethod + def strip_whitespace(cls, v: str | None) -> str | None: + return v.strip() if isinstance(v, str) else v + class ProviderUpdate(ProviderBase): api_key: str = Field(..., description="API key or secret key used for requests to the provider.") @@ -271,6 +274,11 @@ class ProviderUpdate(ProviderBase): base_url: str | None = Field(None, description="Base URL used for requests to the provider.") api_version: str | None = Field(None, description="API version used for requests to the provider.") + @field_validator("api_key", "access_key", mode="before") + @classmethod + def strip_whitespace(cls, v: str | None) -> str | None: + return v.strip() if isinstance(v, str) else v + class ProviderCheck(BaseModel): provider_type: ProviderType = Field(..., description="The type of the provider.") @@ -279,3 +287,8 @@ class ProviderCheck(BaseModel): region: str | None = Field(None, description="Region used for requests to the provider.") base_url: str | None = Field(None, description="Base URL used for requests to the provider.") api_version: str | None = Field(None, description="API version used for requests to the provider.") + + @field_validator("api_key", "access_key", mode="before") + @classmethod + def strip_whitespace(cls, v: str | None) -> str | None: + return v.strip() if isinstance(v, str) else v diff --git a/letta/schemas/providers/chatgpt_oauth.py b/letta/schemas/providers/chatgpt_oauth.py index 2f7f71a2..43a9b2f4 100644 --- a/letta/schemas/providers/chatgpt_oauth.py +++ b/letta/schemas/providers/chatgpt_oauth.py @@ -36,6 +36,8 @@ TOKEN_REFRESH_BUFFER_SECONDS = 300 # Model list based on opencode-openai-codex-auth plugin presets # Reasoning effort levels are configured via llm_config.reasoning_effort CHATGPT_MODELS = [ + # GPT-5.3 codex + {"name": "gpt-5.3-codex", "context_window": 272000}, # GPT-5.2 models (supports none/low/medium/high/xhigh reasoning) {"name": "gpt-5.2", "context_window": 272000}, {"name": "gpt-5.2-codex", "context_window": 272000}, @@ -308,15 +310,21 @@ class ChatGPTOAuthProvider(Provider): ) def get_default_max_output_tokens(self, model_name: str) -> int: - """Get the default max output tokens for ChatGPT models.""" + """Get the default max output tokens for ChatGPT models. + + References: + - https://developers.openai.com/api/docs/models/gpt-5 + - https://developers.openai.com/api/docs/models/gpt-5-codex + - https://developers.openai.com/api/docs/models/gpt-5.1-codex-max + """ + # GPT-5 family (gpt-5, gpt-5.x, codex variants): 128k max output tokens + if "gpt-5" in model_name: + return 128000 # Reasoning models (o-series) have higher limits if model_name.startswith("o1") or model_name.startswith("o3") or model_name.startswith("o4"): return 100000 - # GPT-5.x models - elif "gpt-5" in model_name: - return 16384 # GPT-4 models - elif "gpt-4" in model_name: + if "gpt-4" in model_name: return 16384 return 4096 diff --git a/letta/schemas/providers/lmstudio.py b/letta/schemas/providers/lmstudio.py index c656f188..12079b9c 100644 --- a/letta/schemas/providers/lmstudio.py +++ b/letta/schemas/providers/lmstudio.py @@ -92,7 +92,7 @@ class LMStudioOpenAIProvider(OpenAIProvider): check = self._do_model_checks_for_name_and_context_size(model, length_key="max_context_length") if check is None: continue - model_name, context_window_size = check + model_name, _context_window_size = check configs.append( EmbeddingConfig( diff --git a/letta/schemas/providers/minimax.py b/letta/schemas/providers/minimax.py index 488c578e..28e83681 100644 --- a/letta/schemas/providers/minimax.py +++ b/letta/schemas/providers/minimax.py @@ -32,6 +32,12 @@ MODEL_LIST = [ "max_output": 128000, "description": "Agentic capabilities, advanced reasoning", }, + { + "name": "MiniMax-M2.5", + "context_window": 200000, + "max_output": 128000, + "description": "Peak Performance. Ultimate Value. Master the Complex", + }, ] @@ -98,6 +104,8 @@ class MiniMaxProvider(Provider): max_tokens=model["max_output"], # MiniMax models support native thinking, similar to Claude's extended thinking put_inner_thoughts_in_kwargs=True, + # MiniMax models support parallel tool calling via Anthropic-compatible API + parallel_tool_calls=True, provider_name=self.name, provider_category=self.provider_category, ) diff --git a/letta/schemas/providers/openai.py b/letta/schemas/providers/openai.py index a1bdbb26..c4c979e4 100644 --- a/letta/schemas/providers/openai.py +++ b/letta/schemas/providers/openai.py @@ -1,10 +1,10 @@ from typing import Literal -from openai import AsyncOpenAI, AuthenticationError +from openai import AsyncOpenAI, AuthenticationError, PermissionDeniedError from pydantic import Field from letta.constants import DEFAULT_EMBEDDING_CHUNK_SIZE, LLM_MAX_CONTEXT_WINDOW -from letta.errors import ErrorCode, LLMAuthenticationError, LLMError +from letta.errors import ErrorCode, LLMAuthenticationError, LLMError, LLMPermissionDeniedError from letta.log import get_logger from letta.schemas.embedding_config import EmbeddingConfig from letta.schemas.enums import ProviderCategory, ProviderType @@ -38,6 +38,15 @@ class OpenAIProvider(Provider): await client.models.list() except AuthenticationError as e: raise LLMAuthenticationError(message=f"Failed to authenticate with OpenAI: {e}", code=ErrorCode.UNAUTHENTICATED) + except PermissionDeniedError as e: + raise LLMPermissionDeniedError(message=f"Permission denied by OpenAI: {e}", code=ErrorCode.PERMISSION_DENIED) + except AttributeError as e: + if "_set_private_attributes" in str(e): + raise LLMError( + message=f"OpenAI-compatible endpoint at {self.base_url} returned an unexpected non-JSON response. Verify the base URL and that the endpoint is reachable.", + code=ErrorCode.INTERNAL_SERVER_ERROR, + ) + raise LLMError(message=f"{e}", code=ErrorCode.INTERNAL_SERVER_ERROR) except Exception as e: raise LLMError(message=f"{e}", code=ErrorCode.INTERNAL_SERVER_ERROR) diff --git a/letta/schemas/providers/openrouter.py b/letta/schemas/providers/openrouter.py index 7f1ba419..10136044 100644 --- a/letta/schemas/providers/openrouter.py +++ b/letta/schemas/providers/openrouter.py @@ -1,9 +1,9 @@ from typing import Literal -from openai import AsyncOpenAI, AuthenticationError +from openai import AsyncOpenAI, AuthenticationError, PermissionDeniedError from pydantic import Field -from letta.errors import ErrorCode, LLMAuthenticationError, LLMError +from letta.errors import ErrorCode, LLMAuthenticationError, LLMError, LLMPermissionDeniedError from letta.log import get_logger from letta.schemas.enums import ProviderCategory, ProviderType from letta.schemas.llm_config import LLMConfig @@ -41,6 +41,15 @@ class OpenRouterProvider(OpenAIProvider): await client.models.list() except AuthenticationError as e: raise LLMAuthenticationError(message=f"Failed to authenticate with OpenRouter: {e}", code=ErrorCode.UNAUTHENTICATED) + except PermissionDeniedError as e: + raise LLMPermissionDeniedError(message=f"Permission denied by OpenRouter: {e}", code=ErrorCode.PERMISSION_DENIED) + except AttributeError as e: + if "_set_private_attributes" in str(e): + raise LLMError( + message=f"OpenRouter endpoint at {self.base_url} returned an unexpected non-JSON response. Verify the base URL and API key.", + code=ErrorCode.INTERNAL_SERVER_ERROR, + ) + raise LLMError(message=f"{e}", code=ErrorCode.INTERNAL_SERVER_ERROR) except Exception as e: raise LLMError(message=f"{e}", code=ErrorCode.INTERNAL_SERVER_ERROR) @@ -84,7 +93,7 @@ class OpenRouterProvider(OpenAIProvider): model_name = model["id"] # OpenRouter returns context_length in the model listing - if "context_length" in model and model["context_length"]: + if model.get("context_length"): context_window_size = model["context_length"] else: context_window_size = self.get_model_context_window_size(model_name) diff --git a/letta/schemas/providers/sglang.py b/letta/schemas/providers/sglang.py index 657c2e38..48d5d013 100644 --- a/letta/schemas/providers/sglang.py +++ b/letta/schemas/providers/sglang.py @@ -15,30 +15,12 @@ from letta.schemas.providers.base import Provider class SGLangProvider(Provider): - provider_type: Literal[ProviderType.sglang] = Field( - ProviderType.sglang, - description="The type of the provider." - ) - provider_category: ProviderCategory = Field( - ProviderCategory.base, - description="The category of the provider (base or byok)" - ) - base_url: str = Field( - ..., - description="Base URL for the SGLang API (e.g., http://localhost:30000)." - ) - api_key: str | None = Field( - None, - description="API key for the SGLang API (optional for local instances)." - ) - default_prompt_formatter: str | None = Field( - default=None, - description="Default prompt formatter (aka model wrapper)." - ) - handle_base: str | None = Field( - None, - description="Custom handle base name for model handles." - ) + provider_type: Literal[ProviderType.sglang] = Field(ProviderType.sglang, description="The type of the provider.") + provider_category: ProviderCategory = Field(ProviderCategory.base, description="The category of the provider (base or byok)") + base_url: str = Field(..., description="Base URL for the SGLang API (e.g., http://localhost:30000).") + api_key: str | None = Field(None, description="API key for the SGLang API (optional for local instances).") + default_prompt_formatter: str | None = Field(default=None, description="Default prompt formatter (aka model wrapper).") + handle_base: str | None = Field(None, description="Custom handle base name for model handles.") async def list_llm_models_async(self) -> list[LLMConfig]: from letta.llm_api.openai import openai_get_model_list_async @@ -65,7 +47,7 @@ class SGLangProvider(Provider): model_endpoint_type="openai", # SGLang is OpenAI-compatible model_endpoint=base_url, model_wrapper=self.default_prompt_formatter, - context_window=model.get("max_model_len", 8192), + context_window=model.get("max_model_len", 32768), handle=self.get_handle(model_name, base_name=self.handle_base) if self.handle_base else self.get_handle(model_name), max_tokens=self.get_default_max_output_tokens(model_name), provider_name=self.name, diff --git a/letta/schemas/providers/together.py b/letta/schemas/providers/together.py index 013afffe..f86636b1 100644 --- a/letta/schemas/providers/together.py +++ b/letta/schemas/providers/together.py @@ -11,7 +11,7 @@ logger = get_logger(__name__) from pydantic import Field from letta.constants import MIN_CONTEXT_WINDOW -from letta.errors import ErrorCode, LLMAuthenticationError +from letta.errors import ErrorCode, LLMAuthenticationError, LLMPermissionDeniedError from letta.schemas.embedding_config import EmbeddingConfig from letta.schemas.enums import ProviderCategory, ProviderType from letta.schemas.llm_config import LLMConfig @@ -35,8 +35,6 @@ class TogetherProvider(OpenAIProvider): return self._list_llm_models(models) async def list_embedding_models_async(self) -> list[EmbeddingConfig]: - import warnings - logger.warning( "Letta does not currently support listing embedding models for Together. Please " "contact support or reach out via GitHub or Discord to get support." @@ -99,5 +97,8 @@ class TogetherProvider(OpenAIProvider): try: await self.list_llm_models_async() + except (LLMAuthenticationError, LLMPermissionDeniedError): + # Re-raise specific LLM errors as-is + raise except Exception as e: raise LLMAuthenticationError(message=f"Failed to authenticate with Together: {e}", code=ErrorCode.UNAUTHENTICATED) diff --git a/letta/schemas/providers/zai.py b/letta/schemas/providers/zai.py index f62d119d..8682e4b1 100644 --- a/letta/schemas/providers/zai.py +++ b/letta/schemas/providers/zai.py @@ -16,6 +16,8 @@ MODEL_CONTEXT_WINDOWS = { "glm-4.5": 128000, "glm-4.6": 200000, "glm-4.7": 200000, + "glm-5": 200000, + "glm-5-code": 200000, } diff --git a/letta/schemas/source.py b/letta/schemas/source.py index 26a533e8..3c874ad9 100644 --- a/letta/schemas/source.py +++ b/letta/schemas/source.py @@ -3,7 +3,6 @@ from typing import Optional from pydantic import Field -from letta.helpers.tpuf_client import should_use_tpuf from letta.schemas.embedding_config import EmbeddingConfig from letta.schemas.enums import PrimitiveType, VectorDBProvider from letta.schemas.letta_base import LettaBase diff --git a/letta/schemas/step.py b/letta/schemas/step.py index 83126a96..0c525547 100644 --- a/letta/schemas/step.py +++ b/letta/schemas/step.py @@ -25,11 +25,21 @@ class Step(StepBase): provider_name: Optional[str] = Field(None, description="The name of the provider used for this step.") provider_category: Optional[str] = Field(None, description="The category of the provider used for this step.") model: Optional[str] = Field(None, description="The name of the model used for this step.") + model_handle: Optional[str] = Field(None, description="The model handle (e.g., 'openai/gpt-4o-mini') used for this step.") model_endpoint: Optional[str] = Field(None, description="The model endpoint url used for this step.") context_window_limit: Optional[int] = Field(None, description="The context window limit configured for this step.") completion_tokens: Optional[int] = Field(None, description="The number of tokens generated by the agent during this step.") prompt_tokens: Optional[int] = Field(None, description="The number of tokens in the prompt during this step.") total_tokens: Optional[int] = Field(None, description="The total number of tokens processed by the agent during this step.") + cached_input_tokens: Optional[int] = Field( + None, description="The number of input tokens served from cache. None if not reported by provider." + ) + cache_write_tokens: Optional[int] = Field( + None, description="The number of input tokens written to cache (Anthropic only). None if not reported by provider." + ) + reasoning_tokens: Optional[int] = Field( + None, description="The number of reasoning/thinking tokens generated. None if not reported by provider." + ) completion_tokens_details: Optional[Dict] = Field(None, description="Detailed completion token breakdown (e.g., reasoning_tokens).") prompt_tokens_details: Optional[Dict] = Field( None, description="Detailed prompt token breakdown (e.g., cached_tokens, cache_read_tokens, cache_creation_tokens)." diff --git a/letta/schemas/tool.py b/letta/schemas/tool.py index 9b94f82b..9f7f6853 100644 --- a/letta/schemas/tool.py +++ b/letta/schemas/tool.py @@ -20,7 +20,7 @@ from letta.functions.functions import get_json_schema_from_module from letta.functions.mcp_client.types import MCPTool from letta.functions.schema_generator import generate_tool_schema_for_mcp from letta.log import get_logger -from letta.schemas.enums import ToolSourceType, ToolType +from letta.schemas.enums import ToolType from letta.schemas.letta_base import LettaBase from letta.schemas.npm_requirement import NpmRequirement from letta.schemas.pip_requirement import PipRequirement @@ -158,7 +158,7 @@ class ToolCreate(LettaBase): description = mcp_tool.description source_type = "python" tags = [f"{MCP_TOOL_TAG_NAME_PREFIX}:{mcp_server_name}"] - wrapper_func_name, wrapper_function_str = generate_mcp_tool_wrapper(mcp_tool.name) + _wrapper_func_name, wrapper_function_str = generate_mcp_tool_wrapper(mcp_tool.name) return cls( description=description, diff --git a/letta/schemas/usage.py b/letta/schemas/usage.py index d2f5191d..00d59bc4 100644 --- a/letta/schemas/usage.py +++ b/letta/schemas/usage.py @@ -2,10 +2,10 @@ from typing import TYPE_CHECKING, Any, Dict, List, Literal, Optional, Tuple, Uni from pydantic import BaseModel, Field -from letta.schemas.message import Message - if TYPE_CHECKING: + from letta.schemas.enums import ProviderType from letta.schemas.openai.chat_completion_response import ( + UsageStatistics, UsageStatisticsCompletionTokenDetails, UsageStatisticsPromptTokenDetails, ) @@ -127,6 +127,12 @@ class LettaUsageStatistics(BaseModel): None, description="The number of reasoning/thinking tokens generated. None if not reported by provider." ) + # Context window tracking + context_tokens: Optional[int] = Field( + None, + description="Estimate of tokens currently in the context window.", + ) + def to_usage(self, provider_type: Optional["ProviderType"] = None) -> "UsageStatistics": """Convert to UsageStatistics (OpenAI-compatible format). diff --git a/letta/schemas/user.py b/letta/schemas/user.py index 45aa871e..283d75b6 100644 --- a/letta/schemas/user.py +++ b/letta/schemas/user.py @@ -6,6 +6,7 @@ from pydantic import Field from letta.constants import DEFAULT_ORG_ID from letta.schemas.enums import PrimitiveType from letta.schemas.letta_base import LettaBase +from letta.validators import UserId class UserBase(LettaBase): @@ -29,6 +30,6 @@ class UserCreate(UserBase): class UserUpdate(UserBase): - id: str = Field(..., description="The id of the user to update.") + id: UserId = Field(..., description="The id of the user to update.") name: Optional[str] = Field(None, description="The new name of the user.") organization_id: Optional[str] = Field(None, description="The new organization id of the user.") diff --git a/letta/serialize_schemas/__init__.py b/letta/serialize_schemas/__init__.py index 1f6be200..b2082c2f 100644 --- a/letta/serialize_schemas/__init__.py +++ b/letta/serialize_schemas/__init__.py @@ -1 +1 @@ -from letta.serialize_schemas.marshmallow_agent import MarshmallowAgentSchema +from letta.serialize_schemas.marshmallow_agent import MarshmallowAgentSchema as MarshmallowAgentSchema diff --git a/letta/serialize_schemas/marshmallow_agent.py b/letta/serialize_schemas/marshmallow_agent.py index ddcaca38..53014659 100644 --- a/letta/serialize_schemas/marshmallow_agent.py +++ b/letta/serialize_schemas/marshmallow_agent.py @@ -112,7 +112,7 @@ class MarshmallowAgentSchema(BaseSchema): .all() ) # combine system message with step messages - msgs = [system_msg] + step_msgs if system_msg else step_msgs + msgs = [system_msg, *step_msgs] if system_msg else step_msgs else: # no user messages, just return system message msgs = [system_msg] if system_msg else [] @@ -147,7 +147,7 @@ class MarshmallowAgentSchema(BaseSchema): .all() ) # combine system message with step messages - msgs = [system_msg] + step_msgs if system_msg else step_msgs + msgs = [system_msg, *step_msgs] if system_msg else step_msgs else: # no user messages, just return system message msgs = [system_msg] if system_msg else [] @@ -231,7 +231,8 @@ class MarshmallowAgentSchema(BaseSchema): class Meta(BaseSchema.Meta): model = Agent - exclude = BaseSchema.Meta.exclude + ( + exclude = ( + *BaseSchema.Meta.exclude, "project_id", "template_id", "base_template_id", diff --git a/letta/serialize_schemas/marshmallow_agent_environment_variable.py b/letta/serialize_schemas/marshmallow_agent_environment_variable.py index 371614a8..7a4b04d2 100644 --- a/letta/serialize_schemas/marshmallow_agent_environment_variable.py +++ b/letta/serialize_schemas/marshmallow_agent_environment_variable.py @@ -18,4 +18,4 @@ class SerializedAgentEnvironmentVariableSchema(BaseSchema): class Meta(BaseSchema.Meta): model = AgentEnvironmentVariable - exclude = BaseSchema.Meta.exclude + ("agent",) + exclude = (*BaseSchema.Meta.exclude, "agent") diff --git a/letta/serialize_schemas/marshmallow_block.py b/letta/serialize_schemas/marshmallow_block.py index 082cd328..b92e91cf 100644 --- a/letta/serialize_schemas/marshmallow_block.py +++ b/letta/serialize_schemas/marshmallow_block.py @@ -34,4 +34,4 @@ class SerializedBlockSchema(BaseSchema): class Meta(BaseSchema.Meta): model = Block - exclude = BaseSchema.Meta.exclude + ("agents", "identities", "is_deleted", "groups", "organization") + exclude = (*BaseSchema.Meta.exclude, "agents", "identities", "is_deleted", "groups", "organization") diff --git a/letta/serialize_schemas/marshmallow_message.py b/letta/serialize_schemas/marshmallow_message.py index 75678bd7..5d03985d 100644 --- a/letta/serialize_schemas/marshmallow_message.py +++ b/letta/serialize_schemas/marshmallow_message.py @@ -37,4 +37,4 @@ class SerializedMessageSchema(BaseSchema): class Meta(BaseSchema.Meta): model = Message - exclude = BaseSchema.Meta.exclude + ("step", "job_message", "otid", "is_deleted", "organization") + exclude = (*BaseSchema.Meta.exclude, "step", "job_message", "otid", "is_deleted", "organization") diff --git a/letta/serialize_schemas/marshmallow_tag.py b/letta/serialize_schemas/marshmallow_tag.py index be19b90c..2b03be98 100644 --- a/letta/serialize_schemas/marshmallow_tag.py +++ b/letta/serialize_schemas/marshmallow_tag.py @@ -25,4 +25,4 @@ class SerializedAgentTagSchema(BaseSchema): class Meta(BaseSchema.Meta): model = AgentsTags - exclude = BaseSchema.Meta.exclude + ("agent",) + exclude = (*BaseSchema.Meta.exclude, "agent") diff --git a/letta/serialize_schemas/marshmallow_tool.py b/letta/serialize_schemas/marshmallow_tool.py index a6d1c91e..0d8471bf 100644 --- a/letta/serialize_schemas/marshmallow_tool.py +++ b/letta/serialize_schemas/marshmallow_tool.py @@ -34,4 +34,4 @@ class SerializedToolSchema(BaseSchema): class Meta(BaseSchema.Meta): model = Tool - exclude = BaseSchema.Meta.exclude + ("is_deleted", "organization") + exclude = (*BaseSchema.Meta.exclude, "is_deleted", "organization") diff --git a/letta/server/db.py b/letta/server/db.py index 35ca098e..e7c40db3 100644 --- a/letta/server/db.py +++ b/letta/server/db.py @@ -3,7 +3,7 @@ import uuid from contextlib import asynccontextmanager from typing import AsyncGenerator -from sqlalchemy import NullPool +from sqlalchemy import NullPool, text from sqlalchemy.ext.asyncio import ( AsyncEngine, AsyncSession, @@ -12,8 +12,11 @@ from sqlalchemy.ext.asyncio import ( ) from letta.database_utils import get_database_uri_for_context +from letta.log import get_logger from letta.settings import settings +logger = get_logger(__name__) + # Convert PostgreSQL URI to async format using common utility async_pg_uri = get_database_uri_for_context(settings.letta_pg_uri, "async") @@ -75,22 +78,46 @@ class DatabaseRegistry: a BaseException (not Exception) in Python 3.8+. Without this, cancelled tasks would skip rollback() and return connections to the pool with uncommitted transactions, causing "idle in transaction" connection leaks. + + Implements retry logic for transient connection errors (e.g., SSL handshake failures). """ - async with async_session_factory() as session: + max_retries = 3 + retry_delay = 0.1 + + for attempt in range(max_retries): try: - yield session - await session.commit() - except asyncio.CancelledError: - # Task was cancelled (client disconnect, timeout, explicit cancellation) - # Must rollback to avoid returning connection with open transaction - await session.rollback() - raise - except Exception: - await session.rollback() - raise - finally: - session.expunge_all() - await session.close() + async with async_session_factory() as session: + try: + result = await session.execute(text("SELECT pg_backend_pid(), current_setting('statement_timeout')")) + pid, timeout = result.one() + logger.warning(f"[stmt_timeout_debug] pid={pid} statement_timeout={timeout}") + await session.rollback() + yield session + await session.commit() + except asyncio.CancelledError: + # Task was cancelled (client disconnect, timeout, explicit cancellation) + # Must rollback to avoid returning connection with open transaction + await session.rollback() + raise + except Exception: + await session.rollback() + raise + finally: + session.expunge_all() + await session.close() + return + except ConnectionError as e: + if attempt < max_retries - 1: + logger.warning(f"Database connection error (attempt {attempt + 1}/{max_retries}): {e}. Retrying in {retry_delay}s...") + await asyncio.sleep(retry_delay) + retry_delay *= 2 + else: + logger.error(f"Database connection failed after {max_retries} attempts: {e}") + from letta.errors import LettaServiceUnavailableError + + raise LettaServiceUnavailableError( + "Database connection temporarily unavailable. Please retry your request.", service_name="database" + ) from e # Create singleton instance to match existing interface diff --git a/letta/server/rest_api/app.py b/letta/server/rest_api/app.py index 19bdb656..46415429 100644 --- a/letta/server/rest_api/app.py +++ b/letta/server/rest_api/app.py @@ -5,27 +5,28 @@ import logging import os import platform import sys -import threading from contextlib import asynccontextmanager from functools import partial from pathlib import Path from typing import Optional +import anyio import uvicorn # Enable Python fault handler to get stack traces on segfaults faulthandler.enable() +import orjson from fastapi import FastAPI, Request from fastapi.exceptions import RequestValidationError from fastapi.responses import JSONResponse, ORJSONResponse from marshmallow import ValidationError -from sqlalchemy.exc import IntegrityError, OperationalError +from sqlalchemy.exc import DBAPIError, IntegrityError, OperationalError from starlette.middleware.cors import CORSMiddleware from letta.__init__ import __version__ as letta_version from letta.agents.exceptions import IncompatibleAgentType -from letta.constants import ADMIN_PREFIX, API_PREFIX, OPENAI_API_PREFIX +from letta.constants import ADMIN_PREFIX, API_PREFIX from letta.errors import ( AgentExportIdMappingError, AgentExportProcessingError, @@ -33,6 +34,7 @@ from letta.errors import ( AgentNotFoundForExportError, BedrockPermissionError, ConcurrentUpdateError, + ContextWindowExceededError, ConversationBusyError, EmbeddingConfigRequiredError, HandleNotFoundError, @@ -49,17 +51,28 @@ from letta.errors import ( LettaUnsupportedFileUploadError, LettaUserNotFoundError, LLMAuthenticationError, + LLMBadRequestError, LLMError, + LLMInsufficientCreditsError, LLMProviderOverloaded, LLMRateLimitError, LLMTimeoutError, + MemoryRepoBusyError, NoActiveRunsToCancelError, PendingApprovalError, ) +from letta.helpers.json_helpers import sanitize_unicode_surrogates from letta.helpers.pinecone_utils import get_pinecone_indices, should_use_pinecone, upsert_pinecone_indices from letta.jobs.scheduler import start_scheduler_with_leader_election from letta.log import get_logger -from letta.orm.errors import DatabaseTimeoutError, ForeignKeyConstraintViolationError, NoResultFound, UniqueConstraintViolationError +from letta.orm.errors import ( + DatabaseDeadlockError, + DatabaseLockNotAvailableError, + DatabaseTimeoutError, + ForeignKeyConstraintViolationError, + NoResultFound, + UniqueConstraintViolationError, +) from letta.otel.tracing import get_trace_id from letta.schemas.letta_message import create_letta_error_message_schema, create_letta_message_union_schema from letta.schemas.letta_message_content import ( @@ -69,7 +82,31 @@ from letta.schemas.letta_message_content import ( create_letta_user_message_content_union_schema, ) from letta.server.constants import REST_DEFAULT_PORT -from letta.server.db import db_registry + + +class SafeORJSONResponse(ORJSONResponse): + """ORJSONResponse that handles Python strings containing UTF-8 surrogates. + + LLM responses or user input can occasionally contain surrogate characters + (U+D800–U+DFFF) which are valid in Python str but illegal in UTF-8. + Standard orjson serialisation rejects them with: + TypeError: str is not valid UTF-8: surrogates not allowed + This subclass catches that error, strips the surrogates, and retries. + """ + + def render(self, content) -> bytes: + try: + return super().render(content) + except TypeError as exc: + if "surrogates" not in str(exc): + raise + sanitized = sanitize_unicode_surrogates(content) + return orjson.dumps( + sanitized, + option=orjson.OPT_NON_STR_KEYS | orjson.OPT_SERIALIZE_NUMPY, + ) + + from letta.server.global_exception_handler import setup_global_exception_handlers # NOTE(charles): these are extra routes that are not part of v1 but we still need to mount to pass tests @@ -192,6 +229,16 @@ async def lifespan(app_: FastAPI): logger.info(f"[Worker {worker_id}] Starting scheduler with leader election") global server await server.init_async(init_with_default_org_and_user=not settings.no_default_actor) + + # Set server instance for git HTTP endpoints + try: + from letta.server.rest_api.routers.v1.git_http import set_server_instance + + set_server_instance(server) + logger.info(f"[Worker {worker_id}] Git HTTP server instance set") + except Exception as e: + logger.warning(f"[Worker {worker_id}] Failed to set git HTTP server instance: {e}") + try: await start_scheduler_with_leader_election(server) logger.info(f"[Worker {worker_id}] Scheduler initialization completed") @@ -203,6 +250,15 @@ async def lifespan(app_: FastAPI): # Cleanup on shutdown logger.info(f"[Worker {worker_id}] Starting lifespan shutdown") + # Stop watchdog thread (important for clean test/worker shutdown) + try: + from letta.monitoring.event_loop_watchdog import stop_watchdog + + stop_watchdog() + logger.info(f"[Worker {worker_id}] Event loop watchdog stopped") + except Exception as e: + logger.warning(f"[Worker {worker_id}] Failed to stop watchdog: {e}") + try: from letta.jobs.scheduler import shutdown_scheduler_and_release_lock @@ -349,7 +405,7 @@ def create_application() -> "FastAPI": version=letta_version, debug=debug_mode, # if True, the stack trace will be printed in the response lifespan=lifespan, - default_response_class=ORJSONResponse, # Use orjson for 10x faster JSON serialization + default_response_class=SafeORJSONResponse, # Use orjson for 10x faster JSON serialization, with surrogate safety ) # === Global Exception Handlers === @@ -359,6 +415,12 @@ def create_application() -> "FastAPI": # === Exception Handlers === # TODO (cliandy): move to separate file + @app.exception_handler(anyio.BrokenResourceError) + @app.exception_handler(anyio.ClosedResourceError) + async def client_disconnect_handler(request: Request, exc: Exception): + logger.info(f"Client disconnected: {request.method} {request.url.path}") + return JSONResponse(status_code=499, content={"detail": "Client disconnected"}) + @app.exception_handler(Exception) async def generic_error_handler(request: Request, exc: Exception): # Log with structured context @@ -487,6 +549,7 @@ def create_application() -> "FastAPI": app.add_exception_handler(AgentFileImportError, _error_handler_400) app.add_exception_handler(EmbeddingConfigRequiredError, _error_handler_400) app.add_exception_handler(LettaImageFetchError, _error_handler_400) + app.add_exception_handler(ContextWindowExceededError, _error_handler_400) app.add_exception_handler(ValueError, _error_handler_400) # 404 Not Found errors @@ -509,6 +572,7 @@ def create_application() -> "FastAPI": app.add_exception_handler(IntegrityError, _error_handler_409) app.add_exception_handler(ConcurrentUpdateError, _error_handler_409) app.add_exception_handler(ConversationBusyError, _error_handler_409) + app.add_exception_handler(MemoryRepoBusyError, _error_handler_409) app.add_exception_handler(PendingApprovalError, _error_handler_409) app.add_exception_handler(NoActiveRunsToCancelError, _error_handler_409) @@ -527,6 +591,44 @@ def create_application() -> "FastAPI": app.add_exception_handler(LettaServiceUnavailableError, _error_handler_503) app.add_exception_handler(LLMProviderOverloaded, _error_handler_503) + @app.exception_handler(DatabaseLockNotAvailableError) + async def database_lock_not_available_handler(request: Request, exc: DatabaseLockNotAvailableError): + logger.warning(f"Lock not available: {exc}. Original exception: {exc.original_exception}") + return JSONResponse( + status_code=409, + content={"detail": "The resource is currently locked by another operation. Please retry shortly."}, + headers={"Retry-After": "1"}, + ) + + @app.exception_handler(DatabaseDeadlockError) + async def database_deadlock_error_handler(request: Request, exc: DatabaseDeadlockError): + logger.error(f"Deadlock detected: {exc}. Original exception: {exc.original_exception}") + return JSONResponse( + status_code=409, + content={"detail": "A database deadlock was detected. Please retry your request."}, + headers={"Retry-After": "1"}, + ) + + @app.exception_handler(DBAPIError) + async def dbapi_error_handler(request: Request, exc: DBAPIError): + from asyncpg.exceptions import DeadlockDetectedError + + if isinstance(exc.orig, DeadlockDetectedError): + logger.error(f"Deadlock detected (DBAPIError wrapper): {exc}") + return JSONResponse( + status_code=409, + content={"detail": "A database deadlock was detected. Please retry your request."}, + headers={"Retry-After": "1"}, + ) + + logger.error(f"Unhandled DBAPIError: {exc}", exc_info=True) + if SENTRY_ENABLED: + sentry_sdk.capture_exception(exc) + return JSONResponse( + status_code=500, + content={"detail": "A database error occurred."}, + ) + @app.exception_handler(IncompatibleAgentType) async def handle_incompatible_agent_type(request: Request, exc: IncompatibleAgentType): logger.error("Incompatible agent types. Expected: %s, Actual: %s", exc.expected_type, exc.actual_type) @@ -583,12 +685,37 @@ def create_application() -> "FastAPI": @app.exception_handler(LLMRateLimitError) async def llm_rate_limit_error_handler(request: Request, exc: LLMRateLimitError): + is_byok = exc.details.get("is_byok") if isinstance(exc.details, dict) else None + if is_byok: + message = ( + "Rate limit exceeded on your API key. Please check your provider's rate limits and billing, or reduce request frequency." + ) + else: + message = "Rate limit exceeded for LLM model provider. Please wait before making another request." return JSONResponse( status_code=429, content={ "error": { "type": "llm_rate_limit", - "message": "Rate limit exceeded for LLM model provider. Please wait before making another request.", + "message": message, + "detail": str(exc), + } + }, + ) + + @app.exception_handler(LLMInsufficientCreditsError) + async def llm_insufficient_credits_handler(request: Request, exc: LLMInsufficientCreditsError): + is_byok = exc.details.get("is_byok") if isinstance(exc.details, dict) else None + if is_byok: + message = "Insufficient credits on your API key. Please add credits with your LLM provider." + else: + message = "Insufficient credits for LLM request. Please check your account." + return JSONResponse( + status_code=402, + content={ + "error": { + "type": "llm_insufficient_credits", + "message": message, "detail": str(exc), } }, @@ -620,6 +747,19 @@ def create_application() -> "FastAPI": }, ) + @app.exception_handler(LLMBadRequestError) + async def llm_bad_request_error_handler(request: Request, exc: LLMBadRequestError): + return JSONResponse( + status_code=400, + content={ + "error": { + "type": "llm_bad_request", + "message": "The request to the LLM model provider was invalid.", + "detail": str(exc), + } + }, + ) + @app.exception_handler(LLMError) async def llm_error_handler(request: Request, exc: LLMError): return JSONResponse( @@ -748,7 +888,7 @@ def start_server( import uvloop asyncio.set_event_loop_policy(uvloop.EventLoopPolicy()) - except: + except Exception: pass if (os.getenv("LOCAL_HTTPS") == "true") or "--localhttps" in sys.argv: diff --git a/letta/server/rest_api/auth/index.py b/letta/server/rest_api/auth/index.py index 6ee6f3cc..1e982051 100644 --- a/letta/server/rest_api/auth/index.py +++ b/letta/server/rest_api/auth/index.py @@ -22,7 +22,7 @@ class AuthRequest(BaseModel): def setup_auth_router(server: SyncServer, interface: QueuingInterface, password: str) -> APIRouter: - @router.post("/auth", tags=["auth"], response_model=AuthResponse) + @router.post("/auth", tags=["auth"]) def authenticate_user(request: AuthRequest) -> AuthResponse: """ Authenticates the user and sends response with User related data. diff --git a/letta/server/rest_api/interface.py b/letta/server/rest_api/interface.py index 86ffc99e..1d290e1b 100644 --- a/letta/server/rest_api/interface.py +++ b/letta/server/rest_api/interface.py @@ -1227,7 +1227,7 @@ class StreamingServerInterface(AgentChunkStreamingInterface): # } try: func_args = parse_json(function_call.function.arguments) - except: + except Exception: func_args = function_call.function.arguments # processed_chunk = { # "function_call": f"{function_call.function.name}({func_args})", @@ -1262,7 +1262,7 @@ class StreamingServerInterface(AgentChunkStreamingInterface): else: try: func_args = parse_json(function_call.function.arguments) - except: + except Exception: logger.warning(f"Failed to parse function arguments: {function_call.function.arguments}") func_args = {} diff --git a/letta/server/rest_api/middleware/logging.py b/letta/server/rest_api/middleware/logging.py index 16c33f7c..b85519b7 100644 --- a/letta/server/rest_api/middleware/logging.py +++ b/letta/server/rest_api/middleware/logging.py @@ -125,6 +125,12 @@ class LoggingMiddleware(BaseHTTPMiddleware): return response except Exception as exc: + import anyio + + if isinstance(exc, (anyio.BrokenResourceError, anyio.ClosedResourceError)): + logger.info(f"Client disconnected during request: {request.method} {request.url.path}") + raise + # Extract request context request_context = { "method": request.method, diff --git a/letta/server/rest_api/proxy_helpers.py b/letta/server/rest_api/proxy_helpers.py index b8627fe8..644132b5 100644 --- a/letta/server/rest_api/proxy_helpers.py +++ b/letta/server/rest_api/proxy_helpers.py @@ -4,7 +4,6 @@ Shared helper functions for Anthropic-compatible proxy endpoints. These helpers are used by both the Anthropic and Z.ai proxy routers to reduce code duplication. """ -import asyncio import json from fastapi import Request @@ -302,7 +301,7 @@ async def inject_memory_context( # Handle both string and list system prompts if isinstance(existing_system, list): # If it's a list, prepend our context as a text block - modified_data["system"] = existing_system + [{"type": "text", "text": memory_context.rstrip()}] + modified_data["system"] = [*existing_system, {"type": "text", "text": memory_context.rstrip()}] elif existing_system: # If it's a non-empty string, prepend our context modified_data["system"] = memory_context + existing_system @@ -452,8 +451,8 @@ async def backfill_agent_project_id(server, agent, actor, project_id: str): async def get_or_create_claude_code_agent( server, actor, - project_id: str = None, - agent_id: str = None, + project_id: str | None = None, + agent_id: str | None = None, ): """ Get or create a special agent for Claude Code sessions. diff --git a/letta/server/rest_api/redis_stream_manager.py b/letta/server/rest_api/redis_stream_manager.py index f5c96168..6ae4086d 100644 --- a/letta/server/rest_api/redis_stream_manager.py +++ b/letta/server/rest_api/redis_stream_manager.py @@ -276,7 +276,7 @@ async def create_background_stream_processor( maybe_stop_reason = json.loads(maybe_json_chunk) if maybe_json_chunk and maybe_json_chunk[0] == "{" else None if maybe_stop_reason and maybe_stop_reason.get("message_type") == "stop_reason": stop_reason = maybe_stop_reason.get("stop_reason") - except: + except Exception: pass # Stream ended naturally - check if we got a proper terminal @@ -313,7 +313,7 @@ async def create_background_stream_processor( # Set a default stop_reason so run status can be mapped in finally stop_reason = StopReasonType.error.value - except RunCancelledException as e: + except RunCancelledException: # Handle cancellation gracefully - don't write error chunk, cancellation event was already sent logger.info(f"Stream processing stopped due to cancellation for run {run_id}") # The cancellation event was already yielded by cancellation_aware_stream_wrapper @@ -361,25 +361,10 @@ async def create_background_stream_processor( # Update run status to reflect terminal outcome if run_manager and actor and final_stop_reason: - # Map stop_reason to run status - if final_stop_reason in [ - StopReasonType.error.value, - StopReasonType.llm_api_error.value, - StopReasonType.invalid_tool_call.value, - StopReasonType.invalid_llm_response.value, - StopReasonType.no_tool_call.value, - ]: - run_status = RunStatus.failed - elif final_stop_reason == StopReasonType.cancelled.value: - run_status = RunStatus.cancelled - elif final_stop_reason in [ - StopReasonType.end_turn.value, - StopReasonType.max_steps.value, - StopReasonType.tool_rule.value, - StopReasonType.requires_approval.value, - ]: - run_status = RunStatus.completed - else: + # Resolve stop_reason using canonical enum mapping to avoid drift. + try: + run_status = StopReasonType(final_stop_reason).run_status + except ValueError: logger.warning(f"Unknown stop_reason '{final_stop_reason}' for run {run_id}, defaulting to completed") run_status = RunStatus.completed diff --git a/letta/server/rest_api/routers/v1/__init__.py b/letta/server/rest_api/routers/v1/__init__.py index c75f715a..f7293eb2 100644 --- a/letta/server/rest_api/routers/v1/__init__.py +++ b/letta/server/rest_api/routers/v1/__init__.py @@ -6,6 +6,7 @@ from letta.server.rest_api.routers.v1.chat_completions import router as chat_com from letta.server.rest_api.routers.v1.conversations import router as conversations_router from letta.server.rest_api.routers.v1.embeddings import router as embeddings_router from letta.server.rest_api.routers.v1.folders import router as folders_router +from letta.server.rest_api.routers.v1.git_http import router as git_http_router from letta.server.rest_api.routers.v1.groups import router as groups_router from letta.server.rest_api.routers.v1.health import router as health_router from letta.server.rest_api.routers.v1.identities import router as identities_router @@ -39,6 +40,7 @@ ROUTERS = [ agents_router, conversations_router, chat_completions_router, + git_http_router, groups_router, identities_router, internal_agents_router, diff --git a/letta/server/rest_api/routers/v1/agents.py b/letta/server/rest_api/routers/v1/agents.py index d480b963..023fe314 100644 --- a/letta/server/rest_api/routers/v1/agents.py +++ b/letta/server/rest_api/routers/v1/agents.py @@ -1,29 +1,23 @@ import asyncio import json -import traceback -from datetime import datetime, timezone +from datetime import datetime from typing import Annotated, Any, Dict, List, Literal, Optional, Union +import orjson from fastapi import APIRouter, Body, Depends, File, Form, Header, HTTPException, Query, Request, UploadFile, status from fastapi.responses import JSONResponse -from marshmallow import ValidationError -from orjson import orjson -from pydantic import BaseModel, ConfigDict, Field -from sqlalchemy.exc import IntegrityError, OperationalError +from pydantic import BaseModel, ConfigDict, Field, field_validator from starlette.responses import Response, StreamingResponse from letta.agents.agent_loop import AgentLoop from letta.agents.base_agent_v2 import BaseAgentV2 from letta.agents.letta_agent import LettaAgent -from letta.agents.letta_agent_v2 import LettaAgentV2 from letta.agents.letta_agent_v3 import LettaAgentV3 from letta.constants import DEFAULT_MAX_STEPS, DEFAULT_MESSAGE_TOOL, DEFAULT_MESSAGE_TOOL_KWARG, REDIS_RUN_ID_PREFIX from letta.data_sources.redis_client import get_redis_client from letta.errors import ( - AgentExportIdMappingError, - AgentExportProcessingError, - AgentFileImportError, - AgentNotFoundForExportError, + HandleNotFoundError, + LLMError, NoActiveRunsToCancelError, PendingApprovalError, ) @@ -34,10 +28,10 @@ from letta.orm.errors import NoResultFound from letta.otel.context import get_ctx_attributes from letta.otel.metric_registry import MetricRegistry from letta.schemas.agent import AgentRelationships, AgentState, CreateAgent, UpdateAgent -from letta.schemas.agent_file import AgentFileSchema -from letta.schemas.block import BaseBlock, Block, BlockResponse, BlockUpdate +from letta.schemas.agent_file import AgentFileSchema, SkillSchema +from letta.schemas.block import BlockResponse, BlockUpdate from letta.schemas.enums import AgentType, MessageRole, RunStatus -from letta.schemas.file import AgentFileAttachment, FileMetadataBase, PaginatedAgentFiles +from letta.schemas.file import AgentFileAttachment, PaginatedAgentFiles from letta.schemas.group import Group from letta.schemas.job import LettaRequestConfig from letta.schemas.letta_message import LettaMessageUnion, LettaMessageUpdateUnion, MessageType @@ -56,9 +50,10 @@ from letta.schemas.memory import ( from letta.schemas.message import Message, MessageCreate, MessageCreateType, MessageSearchRequest, MessageSearchResult from letta.schemas.passage import Passage from letta.schemas.run import Run as PydanticRun, RunUpdate -from letta.schemas.source import BaseSource, Source -from letta.schemas.tool import BaseTool, Tool +from letta.schemas.source import Source +from letta.schemas.tool import Tool from letta.schemas.tool_execution_result import ToolExecutionResult +from letta.schemas.usage import LettaUsageStatistics from letta.schemas.user import User from letta.serialize_schemas.pydantic_agent_schema import AgentSchema from letta.server.rest_api.dependencies import HeaderParams, get_headers, get_letta_server @@ -79,6 +74,52 @@ router = APIRouter(prefix="/agents", tags=["agents"]) logger = get_logger(__name__) +# Schemas for direct LLM generation endpoint +class GenerateRequest(BaseModel): + """Request for direct LLM generation without agent processing.""" + + prompt: str = Field( + ..., + description="The prompt/message to send to the LLM", + min_length=1, + ) + + system_prompt: Optional[str] = Field( + None, + description="Optional system prompt to prepend to the conversation", + ) + + override_model: Optional[str] = Field( + None, + description="Model handle to use instead of agent's default (e.g., 'openai/gpt-4', 'anthropic/claude-3-5-sonnet')", + ) + + response_schema: Optional[Dict[str, Any]] = Field( + None, + description=( + "JSON schema for structured output. When provided, the LLM will be forced to return " + "a response matching this schema via tool calling. The schema should follow JSON Schema " + "format with 'properties' and optionally 'required' fields." + ), + ) + + @field_validator("prompt") + @classmethod + def validate_prompt_not_empty(cls, v: str) -> str: + """Ensure prompt is not empty or whitespace-only.""" + if not v or not v.strip(): + raise ValueError("prompt cannot be empty or whitespace-only") + return v + + +class GenerateResponse(BaseModel): + """Response from direct LLM generation.""" + + content: str = Field(..., description="The LLM's response text") + model: str = Field(..., description="The model that generated this response") + usage: LettaUsageStatistics = Field(..., description="Token usage statistics") + + @router.get("/", response_model=list[AgentState], operation_id="list_agents") async def list_agents( name: str | None = Query(None, description="Name of the agent"), @@ -247,6 +288,10 @@ async def export_agent( None, description="Conversation ID to export. If provided, uses messages from this conversation instead of the agent's global message history.", ), + scrub_messages: bool = Query( + False, + description="If True, excludes all messages from the export. Useful for sharing agent configs without conversation history.", + ), # do not remove, used to autogeneration of spec # TODO: Think of a better way to export AgentFileSchema spec: AgentFileSchema | None = None, @@ -258,7 +303,59 @@ async def export_agent( if use_legacy_format: raise HTTPException(status_code=400, detail="Legacy format is not supported") actor = await server.user_manager.get_actor_or_default_async(actor_id=headers.actor_id) - agent_file_schema = await server.agent_serialization_manager.export(agent_ids=[agent_id], actor=actor, conversation_id=conversation_id) + agent_file_schema = await server.agent_serialization_manager.export( + agent_ids=[agent_id], + actor=actor, + conversation_id=conversation_id, + scrub_messages=scrub_messages, + ) + return agent_file_schema.model_dump() + + +class ExportAgentRequest(BaseModel): + """Request body for POST /export endpoint.""" + + skills: List[SkillSchema] = Field( + default_factory=list, + description="Skills to include in the export. Each skill must have a name and files (including SKILL.md).", + ) + conversation_id: Optional[str] = Field( + None, + description="Conversation ID to export. If provided, uses messages from this conversation instead of the agent's global message history.", + ) + scrub_messages: bool = Field( + default=False, + description="If True, excludes all messages from the export. Useful for sharing agent configs without conversation history.", + ) + + +@router.post("/{agent_id}/export", response_class=IndentedORJSONResponse, operation_id="export_agent_with_skills") +async def export_agent_with_skills( + agent_id: str = AgentId, + request: Optional[ExportAgentRequest] = Body(default=None), + server: "SyncServer" = Depends(get_letta_server), + headers: HeaderParams = Depends(get_headers), +) -> JSONResponse: + """ + Export the serialized JSON representation of an agent with optional skills. + + This POST endpoint allows including skills in the export by providing them in the request body. + Skills are resolved client-side and passed as SkillSchema objects containing the skill files. + """ + actor = await server.user_manager.get_actor_or_default_async(actor_id=headers.actor_id) + + # Use defaults if no request body provided + skills = request.skills if request else [] + conversation_id = request.conversation_id if request else None + scrub_messages = request.scrub_messages if request else False + + agent_file_schema = await server.agent_serialization_manager.export( + agent_ids=[agent_id], + actor=actor, + conversation_id=conversation_id, + skills=skills, + scrub_messages=scrub_messages, + ) return agent_file_schema.model_dump() @@ -782,7 +879,7 @@ async def detach_source( source = await server.source_manager.get_source_by_id(source_id=source_id, actor=actor) block = await server.agent_manager.get_block_with_label_async(agent_id=agent_state.id, block_label=source.name, actor=actor) await server.block_manager.delete_block_async(block.id, actor) - except: + except Exception: pass return agent_state @@ -814,7 +911,7 @@ async def detach_folder_from_agent( source = await server.source_manager.get_source_by_id(source_id=folder_id, actor=actor) block = await server.agent_manager.get_block_with_label_async(agent_id=agent_state.id, block_label=source.name, actor=actor) await server.block_manager.delete_block_async(block.id, actor) - except: + except Exception: pass if is_1_0_sdk_version(headers): @@ -875,7 +972,7 @@ async def open_file_for_agent( visible_content = truncate_file_visible_content(visible_content, True, per_file_view_window_char_limit) # Use enforce_max_open_files_and_open for efficient LRU handling - closed_files, was_already_open, _ = await server.file_agent_manager.enforce_max_open_files_and_open( + closed_files, _was_already_open, _ = await server.file_agent_manager.enforce_max_open_files_and_open( agent_id=agent_id, file_id=file_id, file_name=file_metadata.file_name, @@ -1172,6 +1269,70 @@ async def modify_block_for_agent( return block +@router.post( + "/{agent_id}/recompile", + response_model=str, + operation_id="recompile_agent", +) +async def recompile_agent( + agent_id: AgentId, + server: "SyncServer" = Depends(get_letta_server), + headers: HeaderParams = Depends(get_headers), + update_timestamp: bool = Query( + False, + description="If True, update the in-context memory last edit timestamp embedded in the system prompt.", + ), + dry_run: bool = Query( + False, + description="If True, do not persist changes; still returns the compiled system prompt.", + ), +): + """Manually trigger system prompt recompilation for an agent.""" + actor = await server.user_manager.get_actor_or_default_async(actor_id=headers.actor_id) + + _, system_message, _, _ = await server.agent_manager.rebuild_system_prompt_async( + agent_id=agent_id, + actor=actor, + force=True, + update_timestamp=update_timestamp, + dry_run=dry_run, + ) + + if system_message is None: + raise HTTPException(status_code=status.HTTP_404_NOT_FOUND, detail=f"No system message found for agent '{agent_id}'") + + return system_message.to_openai_dict().get("content", "") + + +@router.post( + "/{agent_id}/system-prompt/recompile", + response_model=str, + operation_id="recompile_agent_system_prompt", + deprecated=True, +) +async def recompile_agent_system_prompt( + agent_id: AgentId, + server: "SyncServer" = Depends(get_letta_server), + headers: HeaderParams = Depends(get_headers), + update_timestamp: bool = Query( + False, + description="If True, update the in-context memory last edit timestamp embedded in the system prompt.", + ), + dry_run: bool = Query( + False, + description="If True, do not persist changes; still returns the compiled system prompt.", + ), +): + """Deprecated alias for POST /v1/agents/{agent_id}/recompile.""" + return await recompile_agent( + agent_id=agent_id, + server=server, + headers=headers, + update_timestamp=update_timestamp, + dry_run=dry_run, + ) + + @router.patch("/{agent_id}/core-memory/blocks/attach/{block_id}", response_model=AgentState, operation_id="attach_core_memory_block") async def attach_block_to_agent( block_id: BlockId, @@ -1486,6 +1647,10 @@ async def send_message( Process a user message and return the agent's response. This endpoint accepts a message from a user and processes it through the agent. + **Note:** Sending multiple concurrent requests to the same agent can lead to undefined behavior. + Each agent processes messages sequentially, and concurrent requests may interleave in unexpected ways. + Wait for each request to complete before sending the next one. Use separate agents or conversations for parallel processing. + The response format is controlled by the `streaming` field in the request body: - If `streaming=false` (default): Returns a complete LettaResponse with all messages - If `streaming=true`: Returns a Server-Sent Events (SSE) stream @@ -1553,23 +1718,6 @@ async def send_message( # Create a copy of agent state with the overridden llm_config agent = agent.model_copy(update={"llm_config": override_llm_config}) - agent_eligible = agent.multi_agent_group is None or agent.multi_agent_group.manager_type in ["sleeptime", "voice_sleeptime"] - model_compatible = agent.llm_config.model_endpoint_type in [ - "anthropic", - "openai", - "together", - "google_ai", - "google_vertex", - "bedrock", - "ollama", - "azure", - "xai", - "zai", - "groq", - "deepseek", - "chatgpt_oauth", - ] - # Create a new run for execution tracking if settings.track_agent_run: runs_manager = RunManager() @@ -1592,32 +1740,34 @@ async def send_message( await redis_client.set(f"{REDIS_RUN_ID_PREFIX}:{agent_id}", run.id if run else None) run_update_metadata = None + result = None + run_status = RunStatus.failed # Default to failed, updated on success try: - result = None - if agent_eligible and model_compatible: - agent_loop = AgentLoop.load(agent_state=agent, actor=actor) - result = await agent_loop.step( - request.messages, - max_steps=request.max_steps, - run_id=run.id if run else None, - use_assistant_message=request.use_assistant_message, - request_start_timestamp_ns=request_start_timestamp_ns, - include_return_message_types=request.include_return_message_types, - client_tools=request.client_tools, - ) - else: - result = await server.send_message_to_agent( - agent_id=agent_id, - actor=actor, - input_messages=request.messages, - stream_steps=False, - stream_tokens=False, - # Support for AssistantMessage - use_assistant_message=request.use_assistant_message, - assistant_message_tool_name=request.assistant_message_tool_name, - assistant_message_tool_kwarg=request.assistant_message_tool_kwarg, - include_return_message_types=request.include_return_message_types, + # Handle request-level logprobs override + if request.return_logprobs or request.return_token_ids: + agent = agent.model_copy( + update={ + "llm_config": agent.llm_config.model_copy( + update={ + "return_logprobs": request.return_logprobs, + "top_logprobs": request.top_logprobs, + "return_token_ids": request.return_token_ids, + } + ) + } ) + + agent_loop = AgentLoop.load(agent_state=agent, actor=actor) + result = await agent_loop.step( + request.messages, + max_steps=request.max_steps, + run_id=run.id if run else None, + use_assistant_message=request.use_assistant_message, + request_start_timestamp_ns=request_start_timestamp_ns, + include_return_message_types=request.include_return_message_types, + client_tools=request.client_tools, + include_compaction_messages=request.include_compaction_messages, + ) run_status = result.stop_reason.stop_reason.run_status return result except PendingApprovalError as e: @@ -1675,6 +1825,10 @@ async def send_message_streaming( Deprecated: Use the `POST /{agent_id}/messages` endpoint with `streaming=true` in the request body instead. + **Note:** Sending multiple concurrent requests to the same agent can lead to undefined behavior. + Each agent processes messages sequentially, and concurrent requests may interleave in unexpected ways. + Wait for each request to complete before sending the next one. Use separate agents or conversations for parallel processing. + This endpoint accepts a message from a user and processes it through the agent. It will stream the steps of the response always, and stream the tokens if 'stream_tokens' is set to True. """ @@ -1686,7 +1840,7 @@ async def send_message_streaming( # use the streaming service for unified stream handling streaming_service = StreamingService(server) - run, result = await streaming_service.create_agent_stream( + _run, result = await streaming_service.create_agent_stream( agent_id=agent_id, actor=actor, request=request, @@ -1765,6 +1919,75 @@ async def cancel_message( return results +@router.post( + "/{agent_id}/generate", + operation_id="generate_completion", + responses={ + 200: {"description": "Successful generation"}, + 404: {"description": "Agent not found"}, + 422: {"description": "Invalid request parameters"}, + 502: {"description": "LLM provider error"}, + }, +) +async def generate_completion( + agent_id: AgentId, + server: SyncServer = Depends(get_letta_server), + request: GenerateRequest = Body(...), + headers: HeaderParams = Depends(get_headers), +) -> GenerateResponse: + """ + Generate a completion directly from the LLM provider using the agent's configuration. + + This endpoint makes a direct request to the LLM provider without any agent processing: + - No memory or context retrieval + - No tool calling + - No message persistence + - No agent state modification + + Simply provide a prompt, and the endpoint formats it as a user message. + Optionally include a system_prompt for context/instructions. + + The agent's LLM configuration (model, credentials, settings) is used by default. + Use override_model to switch to a different model/provider while still using + the organization's configured providers. + + Example use cases: + - Quick LLM queries without agent overhead + - Testing different models with the same prompt + - Simple chat completions using agent's credentials + - Comparing model outputs on identical prompts + """ + # Get actor for permissions + actor = await server.user_manager.get_actor_or_default_async(actor_id=headers.actor_id) + + # Call the manager to generate the completion + try: + service_response = await server.agent_generate_completion_manager.generate_completion_with_agent_config_async( + agent_id=str(agent_id), + prompt=request.prompt, + system_prompt=request.system_prompt, + actor=actor, + override_model=request.override_model, + response_schema=request.response_schema, + ) + except NoResultFound: + raise HTTPException(status_code=404, detail=f"Agent with ID {agent_id} not found") + except HandleNotFoundError: + raise HTTPException(status_code=404, detail=f"Model '{request.override_model}' not found or not accessible") + except LLMError as e: + raise HTTPException(status_code=502, detail=f"LLM provider error: {str(e)}") + except Exception as e: + logger.error(f"Failed to process LLM response: {str(e)}") + raise HTTPException(status_code=502, detail=f"Failed to process LLM response: {str(e)}") + + # Convert service response to API response model + return GenerateResponse( + content=service_response.content, + model=service_response.model, + usage=service_response.usage, + ) + + @router.post("/messages/search", response_model=List[MessageSearchResult], operation_id="search_messages") async def search_messages( request: MessageSearchRequest = Body(...), @@ -1812,6 +2035,7 @@ async def _process_message_background( max_steps: int = DEFAULT_MAX_STEPS, include_return_message_types: list[MessageType] | None = None, override_model: str | None = None, + include_compaction_messages: bool = False, ) -> None: """Background task to process the message and update run status.""" request_start_timestamp_ns = get_utc_timestamp_ns() @@ -1834,46 +2058,16 @@ async def _process_message_background( # Create a copy of agent state with the overridden llm_config agent = agent.model_copy(update={"llm_config": override_llm_config}) - agent_eligible = agent.multi_agent_group is None or agent.multi_agent_group.manager_type in ["sleeptime", "voice_sleeptime"] - model_compatible = agent.llm_config.model_endpoint_type in [ - "anthropic", - "openai", - "together", - "google_ai", - "google_vertex", - "bedrock", - "ollama", - "azure", - "xai", - "zai", - "groq", - "deepseek", - ] - if agent_eligible and model_compatible: - agent_loop = AgentLoop.load(agent_state=agent, actor=actor) - result = await agent_loop.step( - messages, - max_steps=max_steps, - run_id=run_id, - use_assistant_message=use_assistant_message, - request_start_timestamp_ns=request_start_timestamp_ns, - include_return_message_types=include_return_message_types, - ) - else: - result = await server.send_message_to_agent( - agent_id=agent_id, - actor=actor, - input_messages=messages, - stream_steps=False, - stream_tokens=False, - metadata={"run_id": run_id}, - # Support for AssistantMessage - use_assistant_message=use_assistant_message, - assistant_message_tool_name=assistant_message_tool_name, - assistant_message_tool_kwarg=assistant_message_tool_kwarg, - include_return_message_types=include_return_message_types, - ) - + agent_loop = AgentLoop.load(agent_state=agent, actor=actor) + result = await agent_loop.step( + messages, + max_steps=max_steps, + run_id=run_id, + use_assistant_message=use_assistant_message, + request_start_timestamp_ns=request_start_timestamp_ns, + include_return_message_types=include_return_message_types, + include_compaction_messages=include_compaction_messages, + ) runs_manager = RunManager() from letta.schemas.enums import RunStatus from letta.schemas.letta_stop_reason import StopReasonType @@ -1972,13 +2166,17 @@ async def send_message_async( The actual processing happens in the background, and the status can be checked using the run ID. This is "asynchronous" in the sense that it's a background run and explicitly must be fetched by the run ID. + + **Note:** Sending multiple concurrent requests to the same agent can lead to undefined behavior. + Each agent processes messages sequentially, and concurrent requests may interleave in unexpected ways. + Wait for each request to complete before sending the next one. Use separate agents or conversations for parallel processing. """ MetricRegistry().user_message_counter.add(1, get_ctx_attributes()) actor = await server.user_manager.get_actor_or_default_async(actor_id=headers.actor_id) try: is_message_input = request.messages[0].type == MessageCreateType.message - except: + except Exception: is_message_input = True use_lettuce = headers.experimental_params.message_async and is_message_input @@ -2036,6 +2234,7 @@ async def send_message_async( max_steps=request.max_steps, include_return_message_types=request.include_return_message_types, override_model=request.override_model, + include_compaction_messages=request.include_compaction_messages, ), label=f"process_message_background_{run.id}", ) @@ -2097,6 +2296,7 @@ async def reset_messages( actor=actor, add_default_initial_messages=request.add_default_initial_messages, needs_agent_state=not is_1_0_sdk_version(headers), + rebuild_system_prompt=True, ) @@ -2154,33 +2354,10 @@ async def preview_model_request( agent = await server.agent_manager.get_agent_by_id_async( agent_id, actor, include_relationships=["multi_agent_group", "memory", "sources"] ) - agent_eligible = agent.multi_agent_group is None or agent.multi_agent_group.manager_type in ["sleeptime", "voice_sleeptime"] - model_compatible = agent.llm_config.model_endpoint_type in [ - "anthropic", - "openai", - "together", - "google_ai", - "google_vertex", - "bedrock", - "ollama", - "azure", - "xai", - "zai", - "groq", - "deepseek", - "chatgpt_oauth", - ] - - if agent_eligible and model_compatible: - agent_loop = AgentLoop.load(agent_state=agent, actor=actor) - return await agent_loop.build_request( - input_messages=request.messages, - ) - else: - raise HTTPException( - status_code=status.HTTP_403_FORBIDDEN, - detail="Payload inspection is not currently supported for this agent configuration.", - ) + agent_loop = AgentLoop.load(agent_state=agent, actor=actor) + return await agent_loop.build_request( + input_messages=request.messages, + ) class CompactionRequest(BaseModel): @@ -2208,53 +2385,68 @@ async def summarize_messages( """ actor = await server.user_manager.get_actor_or_default_async(actor_id=headers.actor_id) - agent = await server.agent_manager.get_agent_by_id_async(agent_id, actor, include_relationships=["multi_agent_group"]) - agent_eligible = agent.multi_agent_group is None or agent.multi_agent_group.manager_type in ["sleeptime", "voice_sleeptime"] - model_compatible = agent.llm_config.model_endpoint_type in [ - "anthropic", - "openai", - "together", - "google_ai", - "google_vertex", - "bedrock", - "ollama", - "azure", - "xai", - "zai", - "groq", - "deepseek", - "chatgpt_oauth", - ] + agent = await server.agent_manager.get_agent_by_id_async(agent_id, actor, include_relationships=["multi_agent_group", "tools"]) - if agent_eligible and model_compatible: - agent_loop = LettaAgentV3(agent_state=agent, actor=actor) - in_context_messages = await server.message_manager.get_messages_by_ids_async(message_ids=agent.message_ids, actor=actor) - compaction_settings = request.compaction_settings if request else None - num_messages_before = len(in_context_messages) - summary_message, messages, summary = await agent_loop.compact( - messages=in_context_messages, - compaction_settings=compaction_settings, - ) - num_messages_after = len(messages) + agent_loop = LettaAgentV3(agent_state=agent, actor=actor) + in_context_messages = await server.message_manager.get_messages_by_ids_async(message_ids=agent.message_ids, actor=actor) - # update the agent state - logger.info(f"Summarized {num_messages_before} messages to {num_messages_after}") - if num_messages_before <= num_messages_after: - raise HTTPException( - status_code=status.HTTP_400_BAD_REQUEST, - detail="Summarization failed to reduce the number of messages. You may need to use a different CompactionSettings (e.g. using `all` mode).", - ) - await agent_loop._checkpoint_messages(run_id=None, step_id=None, new_messages=[summary_message], in_context_messages=messages) + # Early return if there's nothing to compact (only system message, or system + summary) + non_system_summary_messages = [m for m in in_context_messages if m.role not in (MessageRole.system, MessageRole.summary)] + if not non_system_summary_messages: + existing_summary = None + for m in in_context_messages: + if m.role == MessageRole.summary and m.content: + try: + summary_json = json.loads(m.content[0].text) + existing_summary = summary_json.get("message") + except (json.JSONDecodeError, IndexError, AttributeError): + existing_summary = m.content[0].text if m.content else None + break return CompactionResponse( - summary=summary, - num_messages_before=num_messages_before, - num_messages_after=num_messages_after, + summary=existing_summary, + num_messages_before=len(in_context_messages), + num_messages_after=len(in_context_messages), ) + + # Merge request compaction_settings with agent's settings (request overrides agent) + if agent.compaction_settings and request and request.compaction_settings: + # Start with agent's settings, override with new values from request + # Use model_fields_set to get the fields that were changed in the request (want to ignore the defaults that get set automatically) + compaction_settings = agent.compaction_settings.copy() # do not mutate original agent compaction settings + changed_fields = request.compaction_settings.model_fields_set + for field in changed_fields: + setattr(compaction_settings, field, getattr(request.compaction_settings, field)) + + # If mode changed from agent's original settings and prompt not explicitly set in request, then use the default prompt for the new mode + # Ex: previously was sliding_window, now is all, so we need to use the default prompt for all mode + if "mode" in changed_fields and agent.compaction_settings.mode != request.compaction_settings.mode: + from letta.services.summarizer.summarizer_config import get_default_prompt_for_mode + + compaction_settings.prompt = get_default_prompt_for_mode(compaction_settings.mode) else: + compaction_settings = (request and request.compaction_settings) or agent.compaction_settings + num_messages_before = len(in_context_messages) + summary_message, messages, summary = await agent_loop.compact( + messages=in_context_messages, + compaction_settings=compaction_settings, + use_summary_role=True, + ) + num_messages_after = len(messages) + + # update the agent state + logger.info(f"Summarized {num_messages_before} messages to {num_messages_after}") + if num_messages_before <= num_messages_after: + logger.warning(f"Summarization failed to reduce the number of messages. {num_messages_before} messages -> {num_messages_after}.") raise HTTPException( - status_code=status.HTTP_403_FORBIDDEN, - detail="Summarization is not currently supported for this agent configuration. Please contact Letta support.", + status_code=status.HTTP_400_BAD_REQUEST, + detail="Summarization failed to reduce the number of messages. You may need to use a different CompactionSettings (e.g. using `all` mode).", ) + await agent_loop._checkpoint_messages(run_id=None, step_id=None, new_messages=[summary_message], in_context_messages=messages) + return CompactionResponse( + summary=summary, + num_messages_before=num_messages_before, + num_messages_after=num_messages_after, + ) class CaptureMessagesRequest(BaseModel): diff --git a/letta/server/rest_api/routers/v1/anthropic.py b/letta/server/rest_api/routers/v1/anthropic.py index 4aa271ba..34f7a662 100644 --- a/letta/server/rest_api/routers/v1/anthropic.py +++ b/letta/server/rest_api/routers/v1/anthropic.py @@ -1,5 +1,4 @@ import asyncio -import json import httpx from fastapi import APIRouter, Depends, Request @@ -22,6 +21,8 @@ from letta.server.server import SyncServer logger = get_logger(__name__) +_background_tasks: set[asyncio.Task] = set() + router = APIRouter(prefix="/anthropic", tags=["anthropic"]) ANTHROPIC_API_BASE = "https://api.anthropic.com" @@ -173,7 +174,7 @@ async def anthropic_messages_proxy( # This prevents race conditions where multiple requests persist the same message user_messages_to_persist = await check_for_duplicate_message(server, agent, actor, user_messages, PROXY_NAME) - asyncio.create_task( + task = asyncio.create_task( persist_messages_background( server=server, agent=agent, @@ -184,6 +185,8 @@ async def anthropic_messages_proxy( proxy_name=PROXY_NAME, ) ) + _background_tasks.add(task) + task.add_done_callback(_background_tasks.discard) return StreamingResponse( stream_response(), @@ -227,7 +230,7 @@ async def anthropic_messages_proxy( # Check for duplicate user messages before creating background task user_messages_to_persist = await check_for_duplicate_message(server, agent, actor, user_messages, PROXY_NAME) - asyncio.create_task( + task = asyncio.create_task( persist_messages_background( server=server, agent=agent, @@ -238,6 +241,8 @@ async def anthropic_messages_proxy( proxy_name=PROXY_NAME, ) ) + _background_tasks.add(task) + task.add_done_callback(_background_tasks.discard) except Exception as e: logger.warning(f"[{PROXY_NAME}] Failed to extract assistant response for logging: {e}") diff --git a/letta/server/rest_api/routers/v1/archives.py b/letta/server/rest_api/routers/v1/archives.py index 1313bf8d..9076bcc9 100644 --- a/letta/server/rest_api/routers/v1/archives.py +++ b/letta/server/rest_api/routers/v1/archives.py @@ -1,19 +1,17 @@ -from datetime import datetime from typing import Dict, List, Literal, Optional from fastapi import APIRouter, Body, Depends, Query from pydantic import BaseModel, Field from letta import AgentState -from letta.errors import LettaInvalidArgumentError from letta.schemas.agent import AgentRelationships -from letta.schemas.archive import Archive as PydanticArchive, ArchiveBase +from letta.schemas.archive import Archive as PydanticArchive from letta.schemas.embedding_config import EmbeddingConfig from letta.schemas.passage import Passage from letta.server.rest_api.dependencies import HeaderParams, get_headers, get_letta_server from letta.server.server import SyncServer from letta.settings import settings -from letta.validators import AgentId, ArchiveId, PassageId +from letta.validators import ArchiveId, PassageId router = APIRouter(prefix="/archives", tags=["archives"]) diff --git a/letta/server/rest_api/routers/v1/blocks.py b/letta/server/rest_api/routers/v1/blocks.py index d297ab7a..a4c5d539 100644 --- a/letta/server/rest_api/routers/v1/blocks.py +++ b/letta/server/rest_api/routers/v1/blocks.py @@ -1,10 +1,10 @@ from typing import TYPE_CHECKING, List, Literal, Optional -from fastapi import APIRouter, Body, Depends, HTTPException, Query +from fastapi import APIRouter, Body, Depends, Query from letta.orm.errors import NoResultFound from letta.schemas.agent import AgentRelationships, AgentState -from letta.schemas.block import BaseBlock, Block, BlockResponse, BlockUpdate, CreateBlock +from letta.schemas.block import Block, BlockResponse, BlockUpdate, CreateBlock from letta.server.rest_api.dependencies import HeaderParams, get_headers, get_letta_server from letta.server.server import SyncServer from letta.utils import is_1_0_sdk_version diff --git a/letta/server/rest_api/routers/v1/conversations.py b/letta/server/rest_api/routers/v1/conversations.py index 88dff474..141fba52 100644 --- a/letta/server/rest_api/routers/v1/conversations.py +++ b/letta/server/rest_api/routers/v1/conversations.py @@ -17,14 +17,13 @@ from letta.schemas.enums import RunStatus from letta.schemas.job import LettaRequestConfig from letta.schemas.letta_message import LettaMessageUnion from letta.schemas.letta_request import ConversationMessageRequest, LettaStreamingRequest, RetrieveStreamRequest -from letta.schemas.letta_response import LettaResponse, LettaStreamingResponse +from letta.schemas.letta_response import LettaResponse from letta.schemas.run import Run as PydanticRun from letta.server.rest_api.dependencies import HeaderParams, get_headers, get_letta_server from letta.server.rest_api.redis_stream_manager import redis_sse_stream_generator from letta.server.rest_api.streaming_response import ( StreamingResponseWithStatusCode, add_keepalive_to_stream, - cancellation_aware_stream_wrapper, ) from letta.server.server import SyncServer from letta.services.conversation_manager import ConversationManager @@ -61,21 +60,30 @@ async def create_conversation( @router.get("/", response_model=List[Conversation], operation_id="list_conversations") async def list_conversations( - agent_id: str = Query(..., description="The agent ID to list conversations for"), + agent_id: Optional[str] = Query( + None, description="The agent ID to list conversations for (optional - returns all conversations if not provided)" + ), limit: int = Query(50, description="Maximum number of conversations to return"), after: Optional[str] = Query(None, description="Cursor for pagination (conversation ID)"), summary_search: Optional[str] = Query(None, description="Search for text within conversation summaries"), + order: Literal["asc", "desc"] = Query( + "desc", description="Sort order for conversations. 'asc' for oldest first, 'desc' for newest first" + ), + order_by: Literal["created_at", "last_run_completion"] = Query("created_at", description="Field to sort by"), server: SyncServer = Depends(get_letta_server), headers: HeaderParams = Depends(get_headers), ): - """List all conversations for an agent.""" + """List all conversations for an agent (or all conversations if agent_id not provided).""" actor = await server.user_manager.get_actor_or_default_async(actor_id=headers.actor_id) + ascending = order == "asc" return await conversation_manager.list_conversations( agent_id=agent_id, actor=actor, limit=limit, after=after, summary_search=summary_search, + ascending=ascending, + sort_by=order_by, ) @@ -109,6 +117,26 @@ async def update_conversation( ) +@router.delete("/{conversation_id}", response_model=None, operation_id="delete_conversation") +async def delete_conversation( + conversation_id: ConversationId, + server: SyncServer = Depends(get_letta_server), + headers: HeaderParams = Depends(get_headers), +): + """ + Delete a conversation (soft delete). + + This marks the conversation as deleted but does not permanently remove it from the database. + The conversation will no longer appear in list operations. + Any isolated blocks associated with the conversation will be permanently deleted. + """ + actor = await server.user_manager.get_actor_or_default_async(actor_id=headers.actor_id) + await conversation_manager.delete_conversation( + conversation_id=conversation_id, + actor=actor, + ) + + ConversationMessagesResponse = Annotated[ List[LettaMessageUnion], Field(json_schema_extra={"type": "array", "items": {"$ref": "#/components/schemas/LettaMessageUnion"}}) ] @@ -229,6 +257,17 @@ async def send_conversation_message( include_relationships=["memory", "multi_agent_group", "sources", "tool_exec_environment_variables", "tools", "tags"], ) + # Apply conversation-level model override if set (lower priority than request override) + if conversation.model and not request.override_model: + conversation_llm_config = await server.get_llm_config_from_handle_async( + actor=actor, + handle=conversation.model, + ) + if conversation.model_settings is not None: + update_params = conversation.model_settings._to_legacy_config_params() + conversation_llm_config = conversation_llm_config.model_copy(update=update_params) + agent = agent.model_copy(update={"llm_config": conversation_llm_config}) + if request.override_model: override_llm_config = await server.get_llm_config_from_handle_async( actor=actor, @@ -265,6 +304,7 @@ async def send_conversation_message( include_return_message_types=request.include_return_message_types, client_tools=request.client_tools, conversation_id=conversation_id, + include_compaction_messages=request.include_compaction_messages, ) @@ -469,29 +509,6 @@ async def compact_conversation( # Get the agent state agent = await server.agent_manager.get_agent_by_id_async(conversation.agent_id, actor, include_relationships=["multi_agent_group"]) - # Check eligibility - agent_eligible = agent.multi_agent_group is None or agent.multi_agent_group.manager_type in ["sleeptime", "voice_sleeptime"] - model_compatible = agent.llm_config.model_endpoint_type in [ - "anthropic", - "openai", - "together", - "google_ai", - "google_vertex", - "bedrock", - "ollama", - "azure", - "xai", - "zai", - "groq", - "deepseek", - ] - - if not (agent_eligible and model_compatible): - raise HTTPException( - status_code=status.HTTP_403_FORBIDDEN, - detail="Summarization is not currently supported for this agent configuration. Please contact Letta support.", - ) - # Get in-context messages for this conversation in_context_messages = await conversation_manager.get_messages_for_conversation( conversation_id=conversation_id, @@ -514,15 +531,19 @@ async def compact_conversation( summary_message, messages, summary = await agent_loop.compact( messages=in_context_messages, compaction_settings=compaction_settings, + use_summary_role=True, ) num_messages_after = len(messages) # Validate compaction reduced messages if num_messages_before <= num_messages_after: - raise HTTPException( - status_code=status.HTTP_400_BAD_REQUEST, - detail="Summarization failed to reduce the number of messages. You may need to use a different CompactionSettings (e.g. using `all` mode).", + logger.warning( + f"Summarization failed to reduce the number of messages. {num_messages_before} messages -> {num_messages_after} (only expected if drop_tool_returns is True)." ) + # raise HTTPException( + # status_code=status.HTTP_400_BAD_REQUEST, + # detail="Summarization failed to reduce the number of messages. You may need to use a different CompactionSettings (e.g. using `all` mode).", + # ) # Checkpoint the messages (this will update the conversation_messages table) await agent_loop._checkpoint_messages(run_id=None, step_id=None, new_messages=[summary_message], in_context_messages=messages) diff --git a/letta/server/rest_api/routers/v1/folders.py b/letta/server/rest_api/routers/v1/folders.py index 908004ac..67505306 100644 --- a/letta/server/rest_api/routers/v1/folders.py +++ b/letta/server/rest_api/routers/v1/folders.py @@ -22,10 +22,10 @@ from letta.otel.tracing import trace_method from letta.schemas.agent import AgentState from letta.schemas.embedding_config import EmbeddingConfig from letta.schemas.enums import DuplicateFileHandling, FileProcessingStatus -from letta.schemas.file import FileMetadata, FileMetadataBase -from letta.schemas.folder import BaseFolder, Folder +from letta.schemas.file import FileMetadata +from letta.schemas.folder import Folder from letta.schemas.passage import Passage -from letta.schemas.source import BaseSource, Source, SourceCreate, SourceUpdate +from letta.schemas.source import Source, SourceCreate, SourceUpdate from letta.schemas.source_metadata import OrganizationSourcesStats from letta.schemas.user import User from letta.server.rest_api.dependencies import HeaderParams, get_headers, get_letta_server @@ -331,7 +331,7 @@ async def upload_file_to_folder( return response elif duplicate_handling == DuplicateFileHandling.REPLACE: # delete the file - deleted_file = await server.file_manager.delete_file(file_id=existing_file.id, actor=actor) + await server.file_manager.delete_file(file_id=existing_file.id, actor=actor) unique_filename = original_filename if not unique_filename: diff --git a/letta/server/rest_api/routers/v1/git_http.py b/letta/server/rest_api/routers/v1/git_http.py new file mode 100644 index 00000000..f7ab7b47 --- /dev/null +++ b/letta/server/rest_api/routers/v1/git_http.py @@ -0,0 +1,324 @@ +"""Git HTTP Smart Protocol endpoints (proxied to memfs service). + +This module proxies `/v1/git/*` requests to the external memfs service, which +handles git smart HTTP protocol (clone, push, pull). + +Example: + + git clone http://localhost:8283/v1/git/{agent_id}/state.git + +Routes (smart HTTP): + GET /v1/git/{agent_id}/state.git/info/refs?service=git-upload-pack + POST /v1/git/{agent_id}/state.git/git-upload-pack + GET /v1/git/{agent_id}/state.git/info/refs?service=git-receive-pack + POST /v1/git/{agent_id}/state.git/git-receive-pack + +Post-push sync to PostgreSQL is triggered from the proxy route after a +successful `git-receive-pack`. +""" + +from __future__ import annotations + +import asyncio +from typing import Dict, Iterable, Optional + +import httpx +from fastapi import APIRouter, Depends, Request +from fastapi.responses import JSONResponse, StreamingResponse +from starlette.background import BackgroundTask + +from letta.log import get_logger +from letta.server.rest_api.dependencies import HeaderParams, get_headers, get_letta_server + +logger = get_logger(__name__) + +_background_tasks: set[asyncio.Task] = set() + +router = APIRouter(prefix="/git", tags=["git"], include_in_schema=False) + +# Global storage for the server instance (set during app startup) +_server_instance = None + + +def set_server_instance(server) -> None: + """Set the Letta server instance for git operations. Called during app startup.""" + + global _server_instance + _server_instance = server + + +async def _sync_after_push(actor_id: str, agent_id: str) -> None: + """Sync blocks to PostgreSQL after a successful push. + + GCS sync is handled by the memfs service. This function syncs the + block contents to PostgreSQL for caching/querying. + """ + if _server_instance is None: + logger.warning("Server instance not set; cannot sync after push") + return + + try: + actor = await _server_instance.user_manager.get_actor_by_id_async(actor_id) + except Exception: + logger.exception("Failed to resolve actor for post-push sync (actor_id=%s)", actor_id) + return + + org_id = actor.organization_id + + # Sync blocks to Postgres (if using GitEnabledBlockManager). + # + # Keep the same pattern as API-driven edits: read from the source of truth + # in object storage after persisting the pushed refs/objects, rather than + # relying on a working tree checkout under repo_path/. + from letta.services.block_manager_git import GitEnabledBlockManager + + if not isinstance(_server_instance.block_manager, GitEnabledBlockManager): + return + + # Retry with backoff to handle race condition where GCS upload is still in progress + # after git-receive-pack returns. The webhook fires immediately but commit objects + # may not be fully uploaded yet. + files = {} + max_retries = 3 + for attempt in range(max_retries): + try: + files = await _server_instance.memory_repo_manager.git.get_files( + agent_id=agent_id, + org_id=org_id, + ref="HEAD", + ) + logger.info("get_files returned %d files (attempt %d)", len(files), attempt + 1) + break + except Exception as e: + if attempt < max_retries - 1: + wait_time = 2**attempt # 1s, 2s, 4s + logger.warning("Failed to read repo files (attempt %d/%d), retrying in %ds: %s", attempt + 1, max_retries, wait_time, e) + await asyncio.sleep(wait_time) + else: + logger.exception("Failed to read repo files after %d retries (agent=%s)", max_retries, agent_id) + + expected_labels = set() + from letta.services.memory_repo.block_markdown import parse_block_markdown + + md_file_paths = sorted([file_path for file_path in files if file_path.endswith(".md")]) + nested_md_file_paths = [file_path for file_path in md_file_paths if "/" in file_path[:-3]] + logger.info( + "Post-push sync file scan: agent=%s total_files=%d md_files=%d nested_md_files=%d sample_md_paths=%s", + agent_id, + len(files), + len(md_file_paths), + len(nested_md_file_paths), + md_file_paths[:10], + ) + + synced = 0 + for file_path, content in files.items(): + if not file_path.endswith(".md"): + continue + + label = file_path[:-3] + expected_labels.add(label) + + # Parse frontmatter to extract metadata alongside value + parsed = parse_block_markdown(content) + + try: + await _server_instance.block_manager._sync_block_to_postgres( + agent_id=agent_id, + label=label, + value=parsed["value"], + actor=actor, + description=parsed.get("description"), + limit=parsed.get("limit"), + read_only=parsed.get("read_only"), + metadata=parsed.get("metadata"), + ) + synced += 1 + logger.info("Synced block %s to PostgreSQL", label) + except Exception: + logger.exception( + "Failed to sync block %s to PostgreSQL (agent=%s) [path=%s nested=%s]", + label, + agent_id, + file_path, + "/" in label, + ) + + if synced == 0: + logger.warning("No *.md files found in repo HEAD during post-push sync (agent=%s)", agent_id) + else: + # Detach blocks that were removed in git. + # + # We treat git as the source of truth for which blocks are attached to + # this agent. If a *.md file disappears from HEAD, detach the + # corresponding block from the agent in Postgres. + try: + existing_blocks = await _server_instance.agent_manager.list_agent_blocks_async( + agent_id=agent_id, + actor=actor, + before=None, + after=None, + limit=1000, + ascending=True, + ) + existing_by_label = {b.label: b for b in existing_blocks} + removed_labels = set(existing_by_label.keys()) - expected_labels + + for label in sorted(removed_labels): + block = existing_by_label.get(label) + if not block: + continue + await _server_instance.agent_manager.detach_block_async( + agent_id=agent_id, + block_id=block.id, + actor=actor, + ) + logger.info("Detached block %s from agent (removed from git)", label) + except Exception: + logger.exception("Failed detaching removed blocks during post-push sync (agent=%s)", agent_id) + + +def _parse_agent_id_from_repo_path(path: str) -> Optional[str]: + """Extract agent_id from a git HTTP path. + + Expected path form: + - {agent_id}/state.git/... + """ + + parts = path.strip("/").split("/") + if len(parts) < 2: + return None + + if parts[1] != "state.git": + return None + + return parts[0] + + +def _filter_out_hop_by_hop_headers(headers: Iterable[tuple[str, str]]) -> Dict[str, str]: + # RFC 7230 hop-by-hop headers that should not be forwarded + hop_by_hop = { + "connection", + "keep-alive", + "proxy-authenticate", + "proxy-authorization", + "te", + "trailers", + "transfer-encoding", + "upgrade", + } + + out: Dict[str, str] = {} + for k, v in headers: + lk = k.lower() + if lk in hop_by_hop: + continue + out[k] = v + return out + + +def _get_memfs_service_url() -> Optional[str]: + """Get the memfs service URL from settings, if configured.""" + from letta.settings import settings + + return settings.memfs_service_url + + +@router.api_route("/{path:path}", methods=["GET", "POST", "PUT", "PATCH", "DELETE", "HEAD", "OPTIONS"]) # pragma: no cover +async def proxy_git_http( + path: str, + request: Request, + server=Depends(get_letta_server), + headers: HeaderParams = Depends(get_headers), +): + """Proxy `/v1/git/*` requests to the memfs service. + + Requires LETTA_MEMFS_SERVICE_URL to be configured. + """ + + memfs_url = _get_memfs_service_url() + + if not memfs_url: + return JSONResponse( + status_code=501, + content={ + "detail": "git HTTP requires memfs service (LETTA_MEMFS_SERVICE_URL not configured)", + }, + ) + + # Proxy to external memfs service + url = f"{memfs_url.rstrip('/')}/git/{path}" + logger.info("proxy_git_http: using memfs service at %s", memfs_url) + + req_headers = _filter_out_hop_by_hop_headers(request.headers.items()) + # Avoid sending FastAPI host/length; httpx will compute + req_headers.pop("host", None) + req_headers.pop("content-length", None) + + # Resolve org_id from the authenticated actor + agent and forward to memfs. + agent_id = _parse_agent_id_from_repo_path(path) + if agent_id is not None: + actor = await server.user_manager.get_actor_or_default_async(actor_id=headers.actor_id) + # Authorization check: ensure the actor can access this agent. + await server.agent_manager.get_agent_by_id_async(agent_id=agent_id, actor=actor, include_relationships=[]) + + # Ensure we set exactly one X-Organization-Id header (avoid duplicate casing). + for k in list(req_headers.keys()): + if k.lower() == "x-organization-id": + req_headers.pop(k, None) + # Use the authenticated actor's org; AgentState may not carry an organization field. + req_headers["X-Organization-Id"] = actor.organization_id + + logger.info( + "proxy_git_http: method=%s path=%s parsed_agent_id=%s actor_id=%s has_user_id_hdr=%s x_org_hdr=%s", + request.method, + path, + agent_id, + headers.actor_id, + bool(request.headers.get("user_id")), + req_headers.get("X-Organization-Id") or req_headers.get("x-organization-id"), + ) + + async def _body_iter(): + async for chunk in request.stream(): + yield chunk + + client = httpx.AsyncClient(timeout=None) + req = client.build_request( + method=request.method, + url=url, + params=request.query_params, + headers=req_headers, + content=_body_iter() if request.method not in {"GET", "HEAD"} else None, + ) + upstream = await client.send(req, stream=True) + + resp_headers = _filter_out_hop_by_hop_headers(upstream.headers.items()) + + # If this was a push, trigger our sync. + if request.method == "POST" and path.endswith("git-receive-pack") and upstream.status_code < 400: + agent_id = _parse_agent_id_from_repo_path(path) + if agent_id is not None: + try: + actor = await server.user_manager.get_actor_or_default_async(actor_id=headers.actor_id) + # Authorization check: ensure the actor can access this agent. + await server.agent_manager.get_agent_by_id_async(agent_id=agent_id, actor=actor, include_relationships=[]) + task = asyncio.create_task(_sync_after_push(actor.id, agent_id)) + _background_tasks.add(task) + task.add_done_callback(_background_tasks.discard) + except Exception: + logger.exception("Failed to trigger post-push sync (agent_id=%s)", agent_id) + + async def _aclose_upstream_and_client() -> None: + try: + await upstream.aclose() + finally: + await client.aclose() + + return StreamingResponse( + upstream.aiter_raw(), + status_code=upstream.status_code, + headers=resp_headers, + media_type=upstream.headers.get("content-type"), + background=BackgroundTask(_aclose_upstream_and_client), + ) diff --git a/letta/server/rest_api/routers/v1/groups.py b/letta/server/rest_api/routers/v1/groups.py index 49553224..29edd0b6 100644 --- a/letta/server/rest_api/routers/v1/groups.py +++ b/letta/server/rest_api/routers/v1/groups.py @@ -5,11 +5,8 @@ from fastapi.responses import JSONResponse from pydantic import Field from letta.constants import DEFAULT_MESSAGE_TOOL, DEFAULT_MESSAGE_TOOL_KWARG -from letta.schemas.group import Group, GroupBase, GroupCreate, GroupUpdate, ManagerType +from letta.schemas.group import Group, GroupCreate, GroupUpdate, ManagerType from letta.schemas.letta_message import LettaMessageUnion, LettaMessageUpdateUnion -from letta.schemas.letta_request import LettaRequest, LettaStreamingRequest -from letta.schemas.letta_response import LettaResponse -from letta.schemas.message import BaseMessage from letta.server.rest_api.dependencies import HeaderParams, get_headers, get_letta_server from letta.server.server import SyncServer from letta.validators import GroupId, MessageId @@ -128,77 +125,6 @@ async def delete_group( return JSONResponse(status_code=status.HTTP_200_OK, content={"message": f"Group id={group_id} successfully deleted"}) -@router.post( - "/{group_id}/messages", - response_model=LettaResponse, - operation_id="send_group_message", - deprecated=True, -) -async def send_group_message( - group_id: GroupId, - server: SyncServer = Depends(get_letta_server), - request: LettaRequest = Body(...), - headers: HeaderParams = Depends(get_headers), -): - """ - Process a user message and return the group's response. - This endpoint accepts a message from a user and processes it through through agents in the group based on the specified pattern - """ - actor = await server.user_manager.get_actor_or_default_async(actor_id=headers.actor_id) - result = await server.send_group_message_to_agent( - group_id=group_id, - actor=actor, - input_messages=request.messages, - stream_steps=False, - stream_tokens=False, - # Support for AssistantMessage - use_assistant_message=request.use_assistant_message, - assistant_message_tool_name=request.assistant_message_tool_name, - assistant_message_tool_kwarg=request.assistant_message_tool_kwarg, - ) - return result - - -@router.post( - "/{group_id}/messages/stream", - response_model=None, - operation_id="send_group_message_streaming", - deprecated=True, - responses={ - 200: { - "description": "Successful response", - "content": { - "text/event-stream": {"description": "Server-Sent Events stream"}, - }, - } - }, -) -async def send_group_message_streaming( - group_id: GroupId, - server: SyncServer = Depends(get_letta_server), - request: LettaStreamingRequest = Body(...), - headers: HeaderParams = Depends(get_headers), -): - """ - Process a user message and return the group's responses. - This endpoint accepts a message from a user and processes it through agents in the group based on the specified pattern. - It will stream the steps of the response always, and stream the tokens if 'stream_tokens' is set to True. - """ - actor = await server.user_manager.get_actor_or_default_async(actor_id=headers.actor_id) - result = await server.send_group_message_to_agent( - group_id=group_id, - actor=actor, - input_messages=request.messages, - stream_steps=True, - stream_tokens=request.stream_tokens, - # Support for AssistantMessage - use_assistant_message=request.use_assistant_message, - assistant_message_tool_name=request.assistant_message_tool_name, - assistant_message_tool_kwarg=request.assistant_message_tool_kwarg, - ) - return result - - GroupMessagesResponse = Annotated[ List[LettaMessageUnion], Field(json_schema_extra={"type": "array", "items": {"$ref": "#/components/schemas/LettaMessageUnion"}}) ] diff --git a/letta/server/rest_api/routers/v1/identities.py b/letta/server/rest_api/routers/v1/identities.py index 2fca5a7a..85ec3ef3 100644 --- a/letta/server/rest_api/routers/v1/identities.py +++ b/letta/server/rest_api/routers/v1/identities.py @@ -1,10 +1,10 @@ -from typing import TYPE_CHECKING, List, Literal, Optional, Union +from typing import TYPE_CHECKING, List, Literal, Optional from fastapi import APIRouter, Body, Depends, Header, Query -from letta.orm.errors import NoResultFound, UniqueConstraintViolationError +from letta.orm.errors import NoResultFound from letta.schemas.agent import AgentRelationships, AgentState -from letta.schemas.block import Block, BlockResponse +from letta.schemas.block import BlockResponse from letta.schemas.identity import ( Identity, IdentityCreate, @@ -53,7 +53,7 @@ async def list_identities( """ actor = await server.user_manager.get_actor_or_default_async(actor_id=headers.actor_id) - identities, next_cursor, has_more = await server.identity_manager.list_identities_async( + identities, _next_cursor, _has_more = await server.identity_manager.list_identities_async( name=name, project_id=project_id, identifier_key=identifier_key, diff --git a/letta/server/rest_api/routers/v1/internal_runs.py b/letta/server/rest_api/routers/v1/internal_runs.py index 75c2efb6..d9cba0b9 100644 --- a/letta/server/rest_api/routers/v1/internal_runs.py +++ b/letta/server/rest_api/routers/v1/internal_runs.py @@ -8,7 +8,6 @@ from letta.schemas.letta_stop_reason import StopReasonType from letta.schemas.run import Run from letta.server.rest_api.dependencies import HeaderParams, get_headers, get_letta_server from letta.server.server import SyncServer -from letta.services.run_manager import RunManager router = APIRouter(prefix="/_internal_runs", tags=["_internal_runs"]) diff --git a/letta/server/rest_api/routers/v1/jobs.py b/letta/server/rest_api/routers/v1/jobs.py index be8b57e9..c6603458 100644 --- a/letta/server/rest_api/routers/v1/jobs.py +++ b/letta/server/rest_api/routers/v1/jobs.py @@ -4,7 +4,7 @@ from fastapi import APIRouter, Depends, Query from letta.errors import LettaInvalidArgumentError from letta.schemas.enums import JobStatus -from letta.schemas.job import Job, JobBase +from letta.schemas.job import Job from letta.server.rest_api.dependencies import HeaderParams, get_headers, get_letta_server from letta.server.server import SyncServer from letta.settings import settings diff --git a/letta/server/rest_api/routers/v1/mcp_servers.py b/letta/server/rest_api/routers/v1/mcp_servers.py index 8037966e..537660de 100644 --- a/letta/server/rest_api/routers/v1/mcp_servers.py +++ b/letta/server/rest_api/routers/v1/mcp_servers.py @@ -1,12 +1,12 @@ -from typing import Any, AsyncGenerator, Dict, List, Optional, Union +from typing import AsyncGenerator, List, Optional, Union -from fastapi import APIRouter, Body, Depends, HTTPException, Request +from fastapi import APIRouter, Body, Depends, Request from httpx import HTTPStatusError from starlette.responses import StreamingResponse +from letta.errors import LettaMCPConnectionError from letta.functions.mcp_client.types import SSEServerConfig, StdioServerConfig, StreamableHTTPServerConfig from letta.log import get_logger -from letta.schemas.letta_message import ToolReturnMessage from letta.schemas.mcp_server import ( CreateMCPServerRequest, MCPServerUnion, @@ -27,7 +27,6 @@ from letta.server.server import SyncServer from letta.services.mcp.oauth_utils import drill_down_exception, oauth_stream_event from letta.services.mcp.stdio_client import AsyncStdioMCPClient from letta.services.mcp.types import OauthStreamEvent -from letta.settings import tool_settings router = APIRouter(prefix="/mcp-servers", tags=["mcp-servers"]) @@ -268,8 +267,7 @@ async def connect_mcp_server( tools = await client.list_tools(serialize=True) yield oauth_stream_event(OauthStreamEvent.SUCCESS, tools=tools) return - except ConnectionError: - # TODO: jnjpng make this connection error check more specific to the 401 unauthorized error + except (ConnectionError, LettaMCPConnectionError): if isinstance(client, AsyncStdioMCPClient): logger.warning("OAuth not supported for stdio") yield oauth_stream_event(OauthStreamEvent.ERROR, message="OAuth not supported for stdio") diff --git a/letta/server/rest_api/routers/v1/messages.py b/letta/server/rest_api/routers/v1/messages.py index e695d292..7dfcda59 100644 --- a/letta/server/rest_api/routers/v1/messages.py +++ b/letta/server/rest_api/routers/v1/messages.py @@ -11,7 +11,7 @@ from letta.schemas.job import BatchJob, JobStatus, JobType, JobUpdate from letta.schemas.letta_message import LettaMessageSearchResult, LettaMessageUnion from letta.schemas.letta_request import CreateBatch from letta.schemas.letta_response import LettaBatchMessages -from letta.schemas.message import Message, MessageSearchRequest, MessageSearchResult, SearchAllMessagesRequest +from letta.schemas.message import Message, SearchAllMessagesRequest from letta.server.rest_api.dependencies import HeaderParams, get_headers, get_letta_server from letta.server.server import SyncServer from letta.settings import settings diff --git a/letta/server/rest_api/routers/v1/providers.py b/letta/server/rest_api/routers/v1/providers.py index 5c0ae926..d20512a3 100644 --- a/letta/server/rest_api/routers/v1/providers.py +++ b/letta/server/rest_api/routers/v1/providers.py @@ -4,7 +4,7 @@ from fastapi import APIRouter, Body, Depends, HTTPException, Query, status from fastapi.responses import JSONResponse from letta.schemas.enums import ProviderCategory, ProviderType -from letta.schemas.providers import Provider, ProviderBase, ProviderCheck, ProviderCreate, ProviderUpdate +from letta.schemas.providers import Provider, ProviderCheck, ProviderCreate, ProviderUpdate from letta.server.rest_api.dependencies import HeaderParams, get_headers, get_letta_server from letta.validators import ProviderId diff --git a/letta/server/rest_api/routers/v1/runs.py b/letta/server/rest_api/routers/v1/runs.py index b4c3973d..c454f7d9 100644 --- a/letta/server/rest_api/routers/v1/runs.py +++ b/letta/server/rest_api/routers/v1/runs.py @@ -20,7 +20,6 @@ from letta.server.rest_api.redis_stream_manager import redis_sse_stream_generato from letta.server.rest_api.streaming_response import ( StreamingResponseWithStatusCode, add_keepalive_to_stream, - cancellation_aware_stream_wrapper, ) from letta.server.server import SyncServer from letta.services.clickhouse_otel_traces import ClickhouseOtelTracesReader diff --git a/letta/server/rest_api/routers/v1/sandbox_configs.py b/letta/server/rest_api/routers/v1/sandbox_configs.py index 5e51fa33..d59181c0 100644 --- a/letta/server/rest_api/routers/v1/sandbox_configs.py +++ b/letta/server/rest_api/routers/v1/sandbox_configs.py @@ -15,7 +15,6 @@ from letta.schemas.environment_variables import ( from letta.schemas.sandbox_config import ( LocalSandboxConfig, SandboxConfig as PydanticSandboxConfig, - SandboxConfigBase, SandboxConfigCreate, SandboxConfigUpdate, ) diff --git a/letta/server/rest_api/routers/v1/sources.py b/letta/server/rest_api/routers/v1/sources.py index d5a38a9c..39f41d52 100644 --- a/letta/server/rest_api/routers/v1/sources.py +++ b/letta/server/rest_api/routers/v1/sources.py @@ -5,8 +5,7 @@ import tempfile from pathlib import Path from typing import List, Optional -from fastapi import APIRouter, Depends, HTTPException, Query, UploadFile -from starlette import status +from fastapi import APIRouter, Depends, Query, UploadFile from starlette.responses import Response import letta.constants as constants @@ -22,9 +21,9 @@ from letta.otel.tracing import trace_method from letta.schemas.agent import AgentState from letta.schemas.embedding_config import EmbeddingConfig from letta.schemas.enums import DuplicateFileHandling, FileProcessingStatus -from letta.schemas.file import FileMetadata, FileMetadataBase +from letta.schemas.file import FileMetadata from letta.schemas.passage import Passage -from letta.schemas.source import BaseSource, Source, SourceCreate, SourceUpdate +from letta.schemas.source import Source, SourceCreate, SourceUpdate from letta.schemas.source_metadata import OrganizationSourcesStats from letta.schemas.user import User from letta.server.rest_api.dependencies import HeaderParams, get_headers, get_letta_server @@ -310,7 +309,7 @@ async def upload_file_to_source( return response elif duplicate_handling == DuplicateFileHandling.REPLACE: # delete the file - deleted_file = await server.file_manager.delete_file(file_id=existing_file.id, actor=actor) + await server.file_manager.delete_file(file_id=existing_file.id, actor=actor) unique_filename = original_filename if not unique_filename: diff --git a/letta/server/rest_api/routers/v1/steps.py b/letta/server/rest_api/routers/v1/steps.py index 0b28a949..b8b238b3 100644 --- a/letta/server/rest_api/routers/v1/steps.py +++ b/letta/server/rest_api/routers/v1/steps.py @@ -7,7 +7,7 @@ from pydantic import BaseModel, Field from letta.schemas.letta_message import LettaMessageUnion from letta.schemas.message import Message from letta.schemas.provider_trace import ProviderTrace -from letta.schemas.step import Step, StepBase +from letta.schemas.step import Step from letta.schemas.step_metrics import StepMetrics from letta.server.rest_api.dependencies import HeaderParams, get_headers, get_letta_server from letta.server.server import SyncServer @@ -106,7 +106,7 @@ async def retrieve_trace_for_step( provider_trace = await server.telemetry_manager.get_provider_trace_by_step_id_async( step_id=step_id, actor=await server.user_manager.get_actor_or_default_async(actor_id=headers.actor_id) ) - except: + except Exception: pass return provider_trace diff --git a/letta/server/rest_api/routers/v1/telemetry.py b/letta/server/rest_api/routers/v1/telemetry.py index e317a791..e4773e6b 100644 --- a/letta/server/rest_api/routers/v1/telemetry.py +++ b/letta/server/rest_api/routers/v1/telemetry.py @@ -27,7 +27,7 @@ async def retrieve_provider_trace( provider_trace = await server.telemetry_manager.get_provider_trace_by_step_id_async( step_id=step_id, actor=await server.user_manager.get_actor_or_default_async(actor_id=headers.actor_id) ) - except: + except Exception: pass return provider_trace diff --git a/letta/server/rest_api/routers/v1/tools.py b/letta/server/rest_api/routers/v1/tools.py index ac6de569..f7ba2c81 100644 --- a/letta/server/rest_api/routers/v1/tools.py +++ b/letta/server/rest_api/routers/v1/tools.py @@ -1,20 +1,20 @@ +import asyncio import json +import traceback from collections.abc import AsyncGenerator from typing import Any, Dict, List, Literal, Optional, Union from fastapi import APIRouter, Body, Depends, HTTPException, Query, Request from httpx import ConnectError, HTTPStatusError +from mcp.shared.exceptions import McpError from pydantic import BaseModel, Field from starlette.responses import StreamingResponse -from letta.constants import DEFAULT_GENERATE_TOOL_MODEL_HANDLE, MAX_TOOL_NAME_LENGTH +from letta.constants import DEFAULT_GENERATE_TOOL_MODEL_HANDLE from letta.errors import ( LettaInvalidArgumentError, - LettaInvalidMCPSchemaError, LettaMCPConnectionError, LettaMCPTimeoutError, - LettaToolCreateError, - LettaToolNameConflictError, LLMError, ) from letta.functions.functions import derive_openai_json_schema @@ -23,25 +23,22 @@ from letta.functions.mcp_client.types import MCPTool, SSEServerConfig, StdioServ from letta.helpers.decorators import deprecated from letta.llm_api.llm_client import LLMClient from letta.log import get_logger -from letta.orm.errors import UniqueConstraintViolationError from letta.orm.mcp_oauth import OAuthSessionStatus from letta.prompts.gpt_system import get_system_text -from letta.schemas.enums import AgentType, MessageRole, ToolType +from letta.schemas.enums import AgentType, LLMCallType, MessageRole, ToolType from letta.schemas.letta_message import ToolReturnMessage from letta.schemas.letta_message_content import TextContent from letta.schemas.mcp import UpdateSSEMCPServer, UpdateStdioMCPServer, UpdateStreamableHTTPMCPServer from letta.schemas.message import Message from letta.schemas.pip_requirement import PipRequirement -from letta.schemas.tool import BaseTool, Tool, ToolCreate, ToolRunFromSource, ToolSearchRequest, ToolSearchResult, ToolUpdate +from letta.schemas.tool import Tool, ToolCreate, ToolRunFromSource, ToolSearchRequest, ToolSearchResult, ToolUpdate from letta.server.rest_api.dependencies import HeaderParams, get_headers, get_letta_server from letta.server.rest_api.streaming_response import StreamingResponseWithStatusCode from letta.server.server import SyncServer from letta.services.mcp.oauth_utils import MCPOAuthSession, drill_down_exception, oauth_stream_event from letta.services.mcp.stdio_client import AsyncStdioMCPClient from letta.services.mcp.types import OauthStreamEvent -from letta.services.summarizer.summarizer import traceback from letta.settings import tool_settings -from letta.utils import asyncio from letta.validators import ToolId router = APIRouter(prefix="/tools", tags=["tools"]) @@ -641,7 +638,9 @@ async def test_mcp_server( tools = await client.list_tools() return {"status": "success", "tools": tools} - except ConnectionError as e: + except (ConnectionError, LettaMCPConnectionError) as e: + if isinstance(e, LettaMCPConnectionError): + raise raise LettaMCPConnectionError(str(e), server_name=request.server_name) except MCPTimeoutError as e: raise LettaMCPTimeoutError(f"MCP server connection timed out: {str(e)}", server_name=request.server_name) @@ -703,8 +702,7 @@ async def connect_mcp_server( tools = await client.list_tools(serialize=True) yield oauth_stream_event(OauthStreamEvent.SUCCESS, tools=tools) return - except ConnectionError as e: - # Only trigger OAuth flow on explicit unauthorized failures + except (ConnectionError, LettaMCPConnectionError) as e: unauthorized = False if isinstance(e.__cause__, HTTPStatusError): unauthorized = e.__cause__.response.status_code == 401 @@ -740,6 +738,13 @@ async def connect_mcp_server( async for event in server.mcp_manager.handle_oauth_flow(request=request, actor=actor, http_request=http_request): yield event return + except ExceptionGroup as eg: + # Handle ExceptionGroup wrapping (Python 3.11+ async TaskGroup can wrap exceptions) + # Unwrap and handle the first exception in the group + exception_to_handle = eg.exceptions[0] if eg.exceptions else eg + detailed_error = drill_down_exception(exception_to_handle) + logger.error(f"Error in OAuth stream (ExceptionGroup):\n{detailed_error}") + yield oauth_stream_event(OauthStreamEvent.ERROR, message=f"Internal error: {detailed_error}") except Exception as e: detailed_error = drill_down_exception(e) logger.error(f"Error in OAuth stream:\n{detailed_error}") @@ -819,7 +824,23 @@ async def execute_mcp_tool( await client.connect_to_server() # Execute the tool - result, success = await client.execute_tool(tool_name, request.args) + try: + result, success = await client.execute_tool(tool_name, request.args) + except Exception as e: + # Handle ExceptionGroup wrapping (Python 3.11+ async TaskGroup can wrap exceptions) + exception_to_check = e + if hasattr(e, "exceptions") and e.exceptions: + if len(e.exceptions) == 1: + exception_to_check = e.exceptions[0] + + # Check by class name to handle both fastmcp.exceptions.ToolError and potential module variations + if exception_to_check.__class__.__name__ == "ToolError": + raise LettaInvalidArgumentError( + f"Invalid arguments for MCP tool '{tool_name}': {str(exception_to_check)}", argument_name="args" + ) + elif isinstance(exception_to_check, McpError): + raise LettaMCPConnectionError(f"MCP tool execution failed: {str(exception_to_check)}", server_name=tool_name) + raise return { "result": result, @@ -956,7 +977,7 @@ async def generate_tool_from_prompt( llm_client.set_telemetry_context( telemetry_manager=TelemetryManager(), - call_type="tool_generation", + call_type=LLMCallType.tool_generation, ) response_data = await llm_client.request_async_with_telemetry(request_data, llm_config) response = await llm_client.convert_response_to_chat_completion(response_data, input_messages, llm_config) diff --git a/letta/server/rest_api/routers/v1/zai.py b/letta/server/rest_api/routers/v1/zai.py index 9a674b8b..7ac2c46a 100644 --- a/letta/server/rest_api/routers/v1/zai.py +++ b/letta/server/rest_api/routers/v1/zai.py @@ -1,5 +1,4 @@ import asyncio -import json import httpx from fastapi import APIRouter, Depends, Request @@ -22,6 +21,8 @@ from letta.server.server import SyncServer logger = get_logger(__name__) +_background_tasks: set[asyncio.Task] = set() + router = APIRouter(prefix="/zai", tags=["zai"]) ZAI_API_BASE = "https://api.z.ai/api/anthropic" @@ -169,7 +170,7 @@ async def zai_messages_proxy( # This prevents race conditions where multiple requests persist the same message user_messages_to_persist = await check_for_duplicate_message(server, agent, actor, user_messages, PROXY_NAME) - asyncio.create_task( + task = asyncio.create_task( persist_messages_background( server=server, agent=agent, @@ -180,6 +181,8 @@ async def zai_messages_proxy( proxy_name=PROXY_NAME, ) ) + _background_tasks.add(task) + task.add_done_callback(_background_tasks.discard) return StreamingResponse( stream_response(), @@ -223,7 +226,7 @@ async def zai_messages_proxy( # Check for duplicate user messages before creating background task user_messages_to_persist = await check_for_duplicate_message(server, agent, actor, user_messages, PROXY_NAME) - asyncio.create_task( + task = asyncio.create_task( persist_messages_background( server=server, agent=agent, @@ -234,6 +237,8 @@ async def zai_messages_proxy( proxy_name=PROXY_NAME, ) ) + _background_tasks.add(task) + task.add_done_callback(_background_tasks.discard) except Exception as e: logger.warning(f"[{PROXY_NAME}] Failed to extract assistant response for logging: {e}") diff --git a/letta/server/rest_api/static_files.py b/letta/server/rest_api/static_files.py index 20d746c7..54a9c44f 100644 --- a/letta/server/rest_api/static_files.py +++ b/letta/server/rest_api/static_files.py @@ -1,74 +1,8 @@ -import importlib.util -import os - -from fastapi import FastAPI, HTTPException -from fastapi.responses import FileResponse -from starlette.exceptions import HTTPException as StarletteHTTPException -from starlette.staticfiles import StaticFiles - - -class SPAStaticFiles(StaticFiles): - async def get_response(self, path: str, scope): - try: - return await super().get_response(path, scope) - except (HTTPException, StarletteHTTPException) as ex: - if ex.status_code == 404: - return await super().get_response("index.html", scope) - else: - raise ex +from fastapi import FastAPI +from fastapi.responses import RedirectResponse def mount_static_files(app: FastAPI): - static_files_path = os.path.join(os.path.dirname(importlib.util.find_spec("letta").origin), "server", "static_files") - if os.path.exists(static_files_path): - app.mount("/assets", StaticFiles(directory=os.path.join(static_files_path, "assets")), name="assets") - - @app.get("/letta_logo_transparent.png", include_in_schema=False) - async def serve_spa(): - return FileResponse(os.path.join(static_files_path, "letta_logo_transparent.png")) - - @app.get("/", include_in_schema=False) - async def serve_spa(): - return FileResponse(os.path.join(static_files_path, "index.html")) - - @app.get("/agents", include_in_schema=False) - async def serve_spa(): - return FileResponse(os.path.join(static_files_path, "index.html")) - - @app.get("/data-sources", include_in_schema=False) - async def serve_spa(): - return FileResponse(os.path.join(static_files_path, "index.html")) - - @app.get("/tools", include_in_schema=False) - async def serve_spa(): - return FileResponse(os.path.join(static_files_path, "index.html")) - - @app.get("/agent-templates", include_in_schema=False) - async def serve_spa(): - return FileResponse(os.path.join(static_files_path, "index.html")) - - @app.get("/human-templates", include_in_schema=False) - async def serve_spa(): - return FileResponse(os.path.join(static_files_path, "index.html")) - - @app.get("/settings/profile", include_in_schema=False) - async def serve_spa(): - return FileResponse(os.path.join(static_files_path, "index.html")) - - @app.get("/agents/{agent-id}/chat", include_in_schema=False) - async def serve_spa(): - return FileResponse(os.path.join(static_files_path, "index.html")) - - -# def mount_static_files(app: FastAPI): -# static_files_path = os.path.join(os.path.dirname(importlib.util.find_spec("letta").origin), "server", "static_files") -# if os.path.exists(static_files_path): - -# @app.get("/{full_path:path}") -# async def serve_spa(full_path: str): -# if full_path.startswith("v1"): -# raise HTTPException(status_code=404, detail="Not found") -# file_path = os.path.join(static_files_path, full_path) -# if os.path.isfile(file_path): -# return FileResponse(file_path) -# return FileResponse(os.path.join(static_files_path, "index.html")) + @app.get("/", include_in_schema=False) + async def redirect_to_docs(): + return RedirectResponse(url="/docs") diff --git a/letta/server/rest_api/streaming_response.py b/letta/server/rest_api/streaming_response.py index 02d727ff..9b7e5738 100644 --- a/letta/server/rest_api/streaming_response.py +++ b/letta/server/rest_api/streaming_response.py @@ -42,7 +42,7 @@ def get_cancellation_event_for_run(run_id: str) -> asyncio.Event: class RunCancelledException(Exception): """Exception raised when a run is explicitly cancelled (not due to client timeout)""" - def __init__(self, run_id: str, message: str = None): + def __init__(self, run_id: str, message: str | None = None): self.run_id = run_id super().__init__(message or f"Run {run_id} was explicitly cancelled") @@ -228,7 +228,7 @@ class StreamingResponseWithStatusCode(StreamingResponse): await asyncio.shield(self._protected_stream_response(send)) except asyncio.CancelledError: logger.info("Stream response was cancelled, but shielded task should continue") - except anyio.ClosedResourceError: + except (anyio.ClosedResourceError, anyio.BrokenResourceError): logger.info("Client disconnected, but shielded task should continue") self._client_connected = False except PendingApprovalError as e: @@ -272,7 +272,7 @@ class StreamingResponseWithStatusCode(StreamingResponse): "more_body": more_body, } ) - except anyio.ClosedResourceError: + except (anyio.ClosedResourceError, anyio.BrokenResourceError): logger.info("Client disconnected during initial response, continuing processing without sending more chunks") self._client_connected = False @@ -302,10 +302,9 @@ class StreamingResponseWithStatusCode(StreamingResponse): "more_body": more_body, } ) - except anyio.ClosedResourceError: + except (anyio.ClosedResourceError, anyio.BrokenResourceError): logger.info("Client disconnected, continuing processing without sending more data") self._client_connected = False - # Continue processing but don't try to send more data # Handle explicit run cancellations (should not throw error) except RunCancelledException as exc: @@ -332,7 +331,7 @@ class StreamingResponseWithStatusCode(StreamingResponse): "more_body": more_body, } ) - except anyio.ClosedResourceError: + except (anyio.ClosedResourceError, anyio.BrokenResourceError): self._client_connected = False return @@ -369,7 +368,7 @@ class StreamingResponseWithStatusCode(StreamingResponse): "more_body": more_body, } ) - except anyio.ClosedResourceError: + except (anyio.ClosedResourceError, anyio.BrokenResourceError): self._client_connected = False capture_sentry_exception(exc) @@ -377,5 +376,5 @@ class StreamingResponseWithStatusCode(StreamingResponse): if more_body and self._client_connected: try: await send({"type": "http.response.body", "body": b"", "more_body": False}) - except anyio.ClosedResourceError: + except (anyio.ClosedResourceError, anyio.BrokenResourceError): self._client_connected = False diff --git a/letta/server/rest_api/utils.py b/letta/server/rest_api/utils.py index 66e15572..bfbbe505 100644 --- a/letta/server/rest_api/utils.py +++ b/letta/server/rest_api/utils.py @@ -5,7 +5,7 @@ import uuid from enum import Enum from typing import Any, AsyncGenerator, Dict, Iterable, List, Optional, Union, cast -from fastapi import Header, HTTPException +from fastapi import HTTPException from openai.types.chat import ChatCompletionMessageParam from openai.types.chat.chat_completion_message_tool_call import ChatCompletionMessageToolCall as OpenAIToolCall, Function as OpenAIFunction from openai.types.chat.completion_create_params import CompletionCreateParams @@ -308,7 +308,7 @@ def create_approval_request_message_from_llm_response( reasoning_content: Optional[List[Union[TextContent, ReasoningContent, RedactedReasoningContent, OmittedReasoningContent]]] = None, pre_computed_assistant_message_id: Optional[str] = None, step_id: str | None = None, - run_id: str = None, + run_id: str | None = None, ) -> Message: messages = [] if allowed_tool_calls: @@ -386,7 +386,7 @@ def create_letta_messages_from_llm_response( function_response: Optional[str], timezone: str, run_id: str | None = None, - step_id: str = None, + step_id: str | None = None, continue_stepping: bool = False, heartbeat_reason: Optional[str] = None, reasoning_content: Optional[ diff --git a/letta/server/server.py b/letta/server/server.py index 2197c38a..33c98482 100644 --- a/letta/server/server.py +++ b/letta/server/server.py @@ -2,19 +2,15 @@ import asyncio import json import os import traceback -from abc import abstractmethod from datetime import datetime from pathlib import Path from typing import Any, Callable, Dict, List, Optional, Tuple, Union import httpx from anthropic import AsyncAnthropic -from fastapi import HTTPException -from fastapi.responses import StreamingResponse import letta.constants as constants import letta.server.utils as server_utils -import letta.system as system from letta.config import LettaConfig from letta.constants import LETTA_TOOL_EXECUTION_DIR from letta.data_sources.connectors import DataConnector, load_data @@ -22,17 +18,13 @@ from letta.errors import ( HandleNotFoundError, LettaInvalidArgumentError, LettaMCPConnectionError, - LettaMCPTimeoutError, ) from letta.functions.mcp_client.types import MCPServerType, MCPTool, MCPToolHealth, SSEServerConfig, StdioServerConfig from letta.functions.schema_validator import validate_complete_json_schema -from letta.groups.helpers import load_multi_agent from letta.helpers.datetime_helpers import get_utc_time -from letta.helpers.json_helpers import json_dumps, json_loads # TODO use custom interface from letta.interface import ( - AgentInterface, # abstract CLIInterface, # for printing to terminal ) from letta.log import get_logger @@ -44,17 +36,13 @@ from letta.schemas.block import Block, BlockUpdate, CreateBlock from letta.schemas.embedding_config import EmbeddingConfig # openai schemas -from letta.schemas.enums import AgentType, JobStatus, MessageStreamStatus, ProviderCategory, ProviderType, SandboxType, ToolSourceType -from letta.schemas.environment_variables import SandboxEnvironmentVariableCreate -from letta.schemas.group import GroupCreate, ManagerType, SleeptimeManager, VoiceSleeptimeManager +from letta.schemas.enums import AgentType, JobStatus, ProviderCategory, ProviderType, ToolSourceType +from letta.schemas.group import GroupCreate, SleeptimeManager, VoiceSleeptimeManager from letta.schemas.job import Job, JobUpdate -from letta.schemas.letta_message import LegacyLettaMessage, LettaMessage, MessageType, ToolReturnMessage -from letta.schemas.letta_message_content import TextContent -from letta.schemas.letta_response import LettaResponse -from letta.schemas.letta_stop_reason import LettaStopReason, StopReasonType +from letta.schemas.letta_message import LettaMessage, ToolReturnMessage from letta.schemas.llm_config import LLMConfig -from letta.schemas.memory import ArchivalMemorySummary, Memory, RecallMemorySummary -from letta.schemas.message import Message, MessageCreate, MessageUpdate +from letta.schemas.memory import Memory +from letta.schemas.message import Message from letta.schemas.passage import Passage from letta.schemas.pip_requirement import PipRequirement from letta.schemas.providers import ( @@ -82,15 +70,12 @@ from letta.schemas.sandbox_config import LocalSandboxConfig, SandboxConfigCreate from letta.schemas.secret import Secret from letta.schemas.source import Source from letta.schemas.tool import Tool -from letta.schemas.usage import LettaUsageStatistics from letta.schemas.user import User -from letta.server.rest_api.chat_completions_interface import ChatCompletionsStreamingInterface -from letta.server.rest_api.interface import StreamingServerInterface -from letta.server.rest_api.utils import sse_async_generator from letta.services.agent_manager import AgentManager from letta.services.agent_serialization_manager import AgentSerializationManager from letta.services.archive_manager import ArchiveManager from letta.services.block_manager import BlockManager +from letta.services.block_manager_git import GIT_MEMORY_ENABLED_TAG, GitEnabledBlockManager from letta.services.file_manager import FileManager from letta.services.files_agents_manager import FileAgentManager from letta.services.group_manager import GroupManager @@ -104,6 +89,7 @@ from letta.services.mcp.sse_client import MCP_CONFIG_TOPLEVEL_KEY from letta.services.mcp.stdio_client import AsyncStdioMCPClient from letta.services.mcp_manager import MCPManager from letta.services.mcp_server_manager import MCPServerManager +from letta.services.memory_repo import MemfsClient from letta.services.message_manager import MessageManager from letta.services.organization_manager import OrganizationManager from letta.services.passage_manager import PassageManager @@ -118,7 +104,7 @@ from letta.services.tool_manager import ToolManager from letta.services.user_manager import UserManager from letta.settings import DatabaseChoice, model_settings, settings, tool_settings from letta.streaming_interface import AgentChunkStreamingInterface -from letta.utils import get_friendly_error_msg, get_persona_text, safe_create_task +from letta.utils import get_friendly_error_msg, get_persona_text config = LettaConfig.load() logger = get_logger(__name__) @@ -165,13 +151,19 @@ class SyncServer(object): self.tool_manager = ToolManager() self.mcp_manager = MCPManager() self.mcp_server_manager = MCPServerManager() - self.block_manager = BlockManager() + self.memory_repo_manager = self._init_memory_repo_manager() + # Use git-enabled block manager if memory repo is configured + # It falls back to standard PostgreSQL behavior when git isn't enabled for an agent + if self.memory_repo_manager: + self.block_manager = GitEnabledBlockManager(memory_repo_manager=self.memory_repo_manager) + else: + self.block_manager = BlockManager() self.source_manager = SourceManager() self.sandbox_config_manager = SandboxConfigManager() self.message_manager = MessageManager() self.job_manager = JobManager() self.run_manager = RunManager() - self.agent_manager = AgentManager() + self.agent_manager = AgentManager(block_manager=self.block_manager) self.archive_manager = ArchiveManager() self.provider_manager = ProviderManager() self.step_manager = StepManager() @@ -182,6 +174,11 @@ class SyncServer(object): self.file_agent_manager = FileAgentManager() self.file_manager = FileManager() + # Import and initialize the agent generate completion manager + from letta.services.agent_generate_completion_manager import AgentGenerateCompletionManager + + self.agent_generate_completion_manager = AgentGenerateCompletionManager(server=self) + self.agent_serialization_manager = AgentSerializationManager( agent_manager=self.agent_manager, tool_manager=self.tool_manager, @@ -416,6 +413,23 @@ class SyncServer(object): force_recreate=True, ) + def _init_memory_repo_manager(self) -> Optional[MemfsClient]: + """Initialize the memory repository manager if configured. + + Requires LETTA_MEMFS_SERVICE_URL to be set to the external memfs service URL. + + Returns: + MemfsClient if configured, None otherwise + """ + from letta.settings import settings + + if not settings.memfs_service_url: + logger.debug("Memory repo manager not configured (memfs_service_url not set)") + return None + + logger.info("Memory repo manager using memfs service: %s", settings.memfs_service_url) + return MemfsClient(base_url=settings.memfs_service_url) + def _get_enabled_provider(self, provider_name: str) -> Optional[Provider]: """Find and return an enabled provider by name. @@ -571,13 +585,46 @@ class SyncServer(object): request.embedding_config = await self.get_embedding_config_from_handle_async(actor=actor, **embedding_config_params) log_event(name="end get_embedding_config_from_handle", attributes=embedding_config_params) + # If git-backed memory is requested on create, we enable it *after* agent creation. + # We strip the tag during creation so `enable_git_memory_for_agent` can be the + # single place that both creates the repo and writes the tag. + wants_git_memory = bool(request.tags and GIT_MEMORY_ENABLED_TAG in request.tags) + create_request = request + if wants_git_memory: + filtered_tags = [t for t in (request.tags or []) if t != GIT_MEMORY_ENABLED_TAG] + updates: dict = {"tags": filtered_tags} + + # Transform block labels to path-based for git-memory agents. + # Blocks without a "/" prefix go under system/ (rendered in system prompt). + # e.g. "human" -> "system/human", "persona" -> "system/persona" + # Blocks with an explicit path (e.g. "notes/project") keep their label. + if request.memory_blocks: + transformed_blocks = [] + for block in request.memory_blocks: + if not block.label.startswith("system/"): + block = block.model_copy(update={"label": f"system/{block.label}"}) + transformed_blocks.append(block) + updates["memory_blocks"] = transformed_blocks + + create_request = request.model_copy(update=updates) + log_event(name="start create_agent db") main_agent = await self.agent_manager.create_agent_async( - agent_create=request, + agent_create=create_request, actor=actor, ) log_event(name="end create_agent db") + # Enable git-backed memory (creates repo + commits initial blocks + adds tag) + if wants_git_memory and isinstance(self.block_manager, GitEnabledBlockManager): + await self.block_manager.enable_git_memory_for_agent(agent_id=main_agent.id, actor=actor) + # Preserve the user's requested tags and git_enabled flag in the response model. + try: + main_agent.tags = list(request.tags or []) + main_agent.memory.git_enabled = True + except Exception: + pass + log_event(name="start insert_files_into_context_window db") # Use folder_ids if provided, otherwise fall back to deprecated source_ids for backwards compatibility folder_ids_to_attach = request.folder_ids if request.folder_ids else request.source_ids @@ -629,6 +676,10 @@ class SyncServer(object): agent = await self.agent_manager.get_agent_by_id_async(agent_id=agent_id, actor=actor) request.llm_config = agent.llm_config.model_copy() update_llm_config_params = request.model_settings._to_legacy_config_params() + # Don't clobber max_tokens with the Pydantic default when the caller + # didn't explicitly provide max_output_tokens in the request. + if "max_output_tokens" not in request.model_settings.model_fields_set: + update_llm_config_params.pop("max_tokens", None) request.llm_config = request.llm_config.model_copy(update=update_llm_config_params) # Copy parallel_tool_calls from request to llm_config if provided @@ -650,12 +701,26 @@ class SyncServer(object): else: await self.create_sleeptime_agent_async(main_agent=agent, actor=actor) - return await self.agent_manager.update_agent_async( + # If git-backed memory is requested via tag update, initialize/backfill the repo. + wants_git_memory = bool(request.tags and GIT_MEMORY_ENABLED_TAG in request.tags) + + updated_agent = await self.agent_manager.update_agent_async( agent_id=agent_id, agent_update=request, actor=actor, ) + # Ensure repo exists and initial blocks are committed when the tag is present. + if wants_git_memory and isinstance(self.block_manager, GitEnabledBlockManager): + await self.block_manager.enable_git_memory_for_agent(agent_id=agent_id, actor=actor) + # Preserve the user's requested tags in the response model. + try: + updated_agent.tags = list(request.tags or []) + except Exception: + pass + + return updated_agent + async def create_sleeptime_agent_async(self, main_agent: AgentState, actor: User) -> Optional[AgentState]: if main_agent.embedding_config is None: logger.warning(f"Skipping sleeptime agent creation for agent {main_agent.id}: no embedding config provided") @@ -789,7 +854,7 @@ class SyncServer(object): async def delete_archival_memory_async(self, memory_id: str, actor: User): # TODO check if it exists first, and throw error if not # TODO: need to also rebuild the prompt here - passage = await self.passage_manager.get_passage_by_id_async(passage_id=memory_id, actor=actor) + await self.passage_manager.get_passage_by_id_async(passage_id=memory_id, actor=actor) # delete the passage await self.passage_manager.delete_passage_by_id_async(passage_id=memory_id, actor=actor) @@ -1077,7 +1142,7 @@ class SyncServer(object): return None try: block = await self.agent_manager.get_block_with_label_async(agent_id=main_agent.id, block_label=source.name, actor=actor) - except: + except Exception: block = await self.block_manager.create_or_update_block_async(Block(label=source.name, value=""), actor=actor) await self.agent_manager.attach_block_async(agent_id=main_agent.id, block_id=block.id, actor=actor) @@ -1166,11 +1231,14 @@ class SyncServer(object): # Build LLMConfig objects from database provider_cache: Dict[str, Provider] = {} + typed_provider_cache: Dict[str, Any] = {} for model in provider_models: # Get provider details (with caching to avoid N+1 queries) if model.provider_id not in provider_cache: provider_cache[model.provider_id] = await self.provider_manager.get_provider_async(model.provider_id, actor) + typed_provider_cache[model.provider_id] = provider_cache[model.provider_id].cast_to_subtype() provider = provider_cache[model.provider_id] + typed_provider = typed_provider_cache[model.provider_id] # Skip non-base providers (they're handled separately) if provider.provider_category != ProviderCategory.base: @@ -1185,11 +1253,13 @@ class SyncServer(object): # For bedrock, use schema default for base_url since DB may have NULL # TODO: can maybe do this for all models but want to isolate change so we don't break any other providers if provider.provider_type == ProviderType.bedrock: - typed_provider = provider.cast_to_subtype() model_endpoint = typed_provider.base_url else: model_endpoint = provider.base_url + # Get provider-specific default max_tokens + max_tokens = typed_provider.get_default_max_output_tokens(model.name) + llm_config = LLMConfig( model=model.name, model_endpoint_type=model.model_endpoint_type, @@ -1198,6 +1268,7 @@ class SyncServer(object): handle=model.handle, provider_name=provider.name, provider_category=provider.provider_category, + max_tokens=max_tokens, ) llm_models.append(llm_config) @@ -1215,8 +1286,24 @@ class SyncServer(object): # Get typed provider to access schema defaults (e.g., base_url) typed_provider = provider.cast_to_subtype() - # Sync models if not synced yet - if provider.last_synced is None: + provider_llm_models = None + should_sync_models = provider.last_synced is None + + # ChatGPT OAuth uses a hardcoded model list. If that list changes, + # backfill already-synced providers that are missing new handles. + if provider.provider_type == ProviderType.chatgpt_oauth and not should_sync_models: + expected_models = await typed_provider.list_llm_models_async() + expected_handles = {model.handle for model in expected_models} + provider_llm_models = await self.provider_manager.list_models_async( + actor=actor, + model_type="llm", + provider_id=provider.id, + enabled=True, + ) + existing_handles = {model.handle for model in provider_llm_models} + should_sync_models = not expected_handles.issubset(existing_handles) + + if should_sync_models: models = await typed_provider.list_llm_models_async() embedding_models = await typed_provider.list_embedding_models_async() await self.provider_manager.sync_provider_models_async( @@ -1228,13 +1315,15 @@ class SyncServer(object): await self.provider_manager.update_provider_last_synced_async(provider.id, actor=actor) # Read from database - provider_llm_models = await self.provider_manager.list_models_async( - actor=actor, - model_type="llm", - provider_id=provider.id, - enabled=True, - ) + if provider_llm_models is None: + provider_llm_models = await self.provider_manager.list_models_async( + actor=actor, + model_type="llm", + provider_id=provider.id, + enabled=True, + ) for model in provider_llm_models: + max_tokens = typed_provider.get_default_max_output_tokens(model.name) llm_config = LLMConfig( model=model.name, model_endpoint_type=model.model_endpoint_type, @@ -1243,6 +1332,7 @@ class SyncServer(object): handle=model.handle, provider_name=provider.name, provider_category=ProviderCategory.byok, + max_tokens=max_tokens, ) llm_models.append(llm_config) except Exception as e: @@ -1464,7 +1554,7 @@ class SyncServer(object): ) -> ToolReturnMessage: """Run a tool from source code""" - from letta.services.tool_schema_generator import generate_schema_for_tool_creation, generate_schema_for_tool_update + from letta.services.tool_schema_generator import generate_schema_for_tool_creation if tool_source_type not in (None, ToolSourceType.python, ToolSourceType.typescript): raise LettaInvalidArgumentError( @@ -1681,9 +1771,14 @@ class SyncServer(object): raise LettaInvalidArgumentError(f"Invalid MCP server config: {server_config}", argument_name="server_config") try: await new_mcp_client.connect_to_server() - except: + except LettaMCPConnectionError: + raise + except Exception: logger.exception(f"Failed to connect to MCP server: {server_config.server_name}") - raise RuntimeError(f"Failed to connect to MCP server: {server_config.server_name}") + raise LettaMCPConnectionError( + message=f"Failed to connect to MCP server: {server_config.server_name}", + server_name=server_config.server_name, + ) # Print out the tools that are connected logger.info(f"Attempting to fetch tools from MCP server: {server_config.server_name}") new_mcp_tools = await new_mcp_client.list_tools() @@ -1754,244 +1849,3 @@ class SyncServer(object): raise LettaInvalidArgumentError(f"Failed to write MCP config file {mcp_config_path}") return list(current_mcp_servers.values()) - - @trace_method - async def send_message_to_agent( - self, - agent_id: str, - actor: User, - # role: MessageRole, - input_messages: List[MessageCreate], - stream_steps: bool, - stream_tokens: bool, - # related to whether or not we return `LettaMessage`s or `Message`s - chat_completion_mode: bool = False, - # Support for AssistantMessage - use_assistant_message: bool = True, - assistant_message_tool_name: str = constants.DEFAULT_MESSAGE_TOOL, - assistant_message_tool_kwarg: str = constants.DEFAULT_MESSAGE_TOOL_KWARG, - metadata: Optional[dict] = None, - request_start_timestamp_ns: Optional[int] = None, - include_return_message_types: Optional[List[MessageType]] = None, - ) -> Union[StreamingResponse, LettaResponse]: - """Split off into a separate function so that it can be imported in the /chat/completion proxy.""" - # TODO: @charles is this the correct way to handle? - include_final_message = True - - if not stream_steps and stream_tokens: - raise HTTPException(status_code=400, detail="stream_steps must be 'true' if stream_tokens is 'true'") - - # For streaming response - try: - # TODO: move this logic into server.py - - # Get the generator object off of the agent's streaming interface - # This will be attached to the POST SSE request used under-the-hood - letta_agent = self.load_agent(agent_id=agent_id, actor=actor) - - # Disable token streaming if not OpenAI or Anthropic - # TODO: cleanup this logic - llm_config = letta_agent.agent_state.llm_config - # supports_token_streaming = ["openai", "anthropic", "xai", "deepseek"] - supports_token_streaming = ["openai", "anthropic", "deepseek", "chatgpt_oauth"] # TODO re-enable xAI once streaming is patched - if stream_tokens and (llm_config.model_endpoint_type not in supports_token_streaming): - logger.warning( - f"Token streaming is only supported for models with type {' or '.join(supports_token_streaming)} in the model_endpoint: agent has endpoint type {llm_config.model_endpoint_type} and {llm_config.model_endpoint}. Setting stream_tokens to False." - ) - stream_tokens = False - - # Create a new interface per request - letta_agent.interface = StreamingServerInterface( - # multi_step=True, # would we ever want to disable this? - use_assistant_message=use_assistant_message, - assistant_message_tool_name=assistant_message_tool_name, - assistant_message_tool_kwarg=assistant_message_tool_kwarg, - inner_thoughts_in_kwargs=( - llm_config.put_inner_thoughts_in_kwargs if llm_config.put_inner_thoughts_in_kwargs is not None else False - ), - # inner_thoughts_kwarg=INNER_THOUGHTS_KWARG, - ) - streaming_interface = letta_agent.interface - if not isinstance(streaming_interface, StreamingServerInterface): - raise LettaInvalidArgumentError( - f"Agent has wrong type of interface: {type(streaming_interface)}", argument_name="interface" - ) - - # Enable token-streaming within the request if desired - streaming_interface.streaming_mode = stream_tokens - # "chatcompletion mode" does some remapping and ignores inner thoughts - streaming_interface.streaming_chat_completion_mode = chat_completion_mode - - # streaming_interface.allow_assistant_message = stream - # streaming_interface.function_call_legacy_mode = stream - - # Allow AssistantMessage is desired by client - # streaming_interface.use_assistant_message = use_assistant_message - # streaming_interface.assistant_message_tool_name = assistant_message_tool_name - # streaming_interface.assistant_message_tool_kwarg = assistant_message_tool_kwarg - - # Related to JSON buffer reader - # streaming_interface.inner_thoughts_in_kwargs = ( - # llm_config.put_inner_thoughts_in_kwargs if llm_config.put_inner_thoughts_in_kwargs is not None else False - # ) - - # Offload the synchronous message_func to a separate thread - streaming_interface.stream_start() - task = safe_create_task( - asyncio.to_thread( - self.send_messages, - actor=actor, - agent_id=agent_id, - input_messages=input_messages, - interface=streaming_interface, - metadata=metadata, - ), - label="send_messages_thread", - ) - - if stream_steps: - # return a stream - return StreamingResponse( - sse_async_generator( - streaming_interface.get_generator(), - usage_task=task, - finish_message=include_final_message, - request_start_timestamp_ns=request_start_timestamp_ns, - llm_config=llm_config, - ), - media_type="text/event-stream", - ) - - else: - # buffer the stream, then return the list - generated_stream = [] - async for message in streaming_interface.get_generator(): - assert ( - isinstance(message, LettaMessage) - or isinstance(message, LegacyLettaMessage) - or isinstance(message, MessageStreamStatus) - ), type(message) - generated_stream.append(message) - if message == MessageStreamStatus.done: - break - - # Get rid of the stream status messages - filtered_stream = [d for d in generated_stream if not isinstance(d, MessageStreamStatus)] - - # Apply message type filtering if specified - if include_return_message_types is not None: - filtered_stream = [msg for msg in filtered_stream if msg.message_type in include_return_message_types] - - usage = await task - - # By default the stream will be messages of type LettaMessage or LettaLegacyMessage - # If we want to convert these to Message, we can use the attached IDs - # NOTE: we will need to de-duplicate the Messsage IDs though (since Assistant->Inner+Func_Call) - # TODO: eventually update the interface to use `Message` and `MessageChunk` (new) inside the deque instead - return LettaResponse( - messages=filtered_stream, - stop_reason=LettaStopReason(stop_reason=StopReasonType.end_turn.value), - usage=usage, - ) - - except HTTPException: - raise - except Exception as e: - logger.exception(f"Error sending message to agent: {e}") - raise HTTPException(status_code=500, detail=f"{e}") - - @trace_method - async def send_group_message_to_agent( - self, - group_id: str, - actor: User, - input_messages: Union[List[Message], List[MessageCreate]], - stream_steps: bool, - stream_tokens: bool, - chat_completion_mode: bool = False, - # Support for AssistantMessage - use_assistant_message: bool = True, - assistant_message_tool_name: str = constants.DEFAULT_MESSAGE_TOOL, - assistant_message_tool_kwarg: str = constants.DEFAULT_MESSAGE_TOOL_KWARG, - metadata: Optional[dict] = None, - ) -> Union[StreamingResponse, LettaResponse]: - include_final_message = True - if not stream_steps and stream_tokens: - raise LettaInvalidArgumentError("stream_steps must be 'true' if stream_tokens is 'true'", argument_name="stream_steps") - - group = await self.group_manager.retrieve_group_async(group_id=group_id, actor=actor) - agent_state_id = group.manager_agent_id or (group.agent_ids[0] if len(group.agent_ids) > 0 else None) - agent_state = await self.agent_manager.get_agent_by_id_async(agent_id=agent_state_id, actor=actor) if agent_state_id else None - letta_multi_agent = load_multi_agent(group=group, agent_state=agent_state, actor=actor) - - llm_config = letta_multi_agent.agent_state.llm_config - supports_token_streaming = ["openai", "anthropic", "deepseek", "chatgpt_oauth"] - if stream_tokens and (llm_config.model_endpoint_type not in supports_token_streaming): - logger.warning( - f"Token streaming is only supported for models with type {' or '.join(supports_token_streaming)} in the model_endpoint: agent has endpoint type {llm_config.model_endpoint_type} and {llm_config.model_endpoint}. Setting stream_tokens to False." - ) - stream_tokens = False - - # Create a new interface per request - letta_multi_agent.interface = StreamingServerInterface( - use_assistant_message=use_assistant_message, - assistant_message_tool_name=assistant_message_tool_name, - assistant_message_tool_kwarg=assistant_message_tool_kwarg, - inner_thoughts_in_kwargs=( - llm_config.put_inner_thoughts_in_kwargs if llm_config.put_inner_thoughts_in_kwargs is not None else False - ), - ) - streaming_interface = letta_multi_agent.interface - if not isinstance(streaming_interface, StreamingServerInterface): - raise LettaInvalidArgumentError(f"Agent has wrong type of interface: {type(streaming_interface)}", argument_name="interface") - streaming_interface.streaming_mode = stream_tokens - streaming_interface.streaming_chat_completion_mode = chat_completion_mode - if metadata and hasattr(streaming_interface, "metadata"): - streaming_interface.metadata = metadata - - streaming_interface.stream_start() - task = safe_create_task( - asyncio.to_thread( - letta_multi_agent.step, - input_messages=input_messages, - chaining=self.chaining, - max_chaining_steps=self.max_chaining_steps, - ), - label="multi_agent_step_thread", - ) - - if stream_steps: - # return a stream - return StreamingResponse( - sse_async_generator( - streaming_interface.get_generator(), - usage_task=task, - finish_message=include_final_message, - ), - media_type="text/event-stream", - ) - - else: - # buffer the stream, then return the list - generated_stream = [] - async for message in streaming_interface.get_generator(): - assert ( - isinstance(message, LettaMessage) or isinstance(message, LegacyLettaMessage) or isinstance(message, MessageStreamStatus) - ), type(message) - generated_stream.append(message) - if message == MessageStreamStatus.done: - break - - # Get rid of the stream status messages - filtered_stream = [d for d in generated_stream if not isinstance(d, MessageStreamStatus)] - usage = await task - - # By default the stream will be messages of type LettaMessage or LettaLegacyMessage - # If we want to convert these to Message, we can use the attached IDs - # NOTE: we will need to de-duplicate the Messsage IDs though (since Assistant->Inner+Func_Call) - # TODO: eventually update the interface to use `Message` and `MessageChunk` (new) inside the deque instead - return LettaResponse( - messages=filtered_stream, - stop_reason=LettaStopReason(stop_reason=StopReasonType.end_turn.value), - usage=usage, - ) diff --git a/letta/server/static_files/assets/index-048c9598.js b/letta/server/static_files/assets/index-048c9598.js deleted file mode 100644 index 7b63c8d1..00000000 --- a/letta/server/static_files/assets/index-048c9598.js +++ /dev/null @@ -1,40 +0,0 @@ -(function(){const n=document.createElement("link").relList;if(n&&n.supports&&n.supports("modulepreload"))return;for(const l of document.querySelectorAll('link[rel="modulepreload"]'))r(l);new MutationObserver(l=>{for(const o of l)if(o.type==="childList")for(const u of o.addedNodes)u.tagName==="LINK"&&u.rel==="modulepreload"&&r(u)}).observe(document,{childList:!0,subtree:!0});function t(l){const o={};return l.integrity&&(o.integrity=l.integrity),l.referrerPolicy&&(o.referrerPolicy=l.referrerPolicy),l.crossOrigin==="use-credentials"?o.credentials="include":l.crossOrigin==="anonymous"?o.credentials="omit":o.credentials="same-origin",o}function r(l){if(l.ep)return;l.ep=!0;const o=t(l);fetch(l.href,o)}})();var Ai={exports:{}},br={},Bi={exports:{}},L={};/** - * @license React - * react.production.min.js - * - * Copyright (c) Facebook, Inc. and its affiliates. - * - * This source code is licensed under the MIT license found in the - * LICENSE file in the root directory of this source tree. - */var Yt=Symbol.for("react.element"),rc=Symbol.for("react.portal"),lc=Symbol.for("react.fragment"),oc=Symbol.for("react.strict_mode"),uc=Symbol.for("react.profiler"),ic=Symbol.for("react.provider"),sc=Symbol.for("react.context"),ac=Symbol.for("react.forward_ref"),cc=Symbol.for("react.suspense"),fc=Symbol.for("react.memo"),dc=Symbol.for("react.lazy"),Ou=Symbol.iterator;function pc(e){return e===null||typeof e!="object"?null:(e=Ou&&e[Ou]||e["@@iterator"],typeof e=="function"?e:null)}var Wi={isMounted:function(){return!1},enqueueForceUpdate:function(){},enqueueReplaceState:function(){},enqueueSetState:function(){}},Qi=Object.assign,Ki={};function lt(e,n,t){this.props=e,this.context=n,this.refs=Ki,this.updater=t||Wi}lt.prototype.isReactComponent={};lt.prototype.setState=function(e,n){if(typeof e!="object"&&typeof e!="function"&&e!=null)throw Error("setState(...): takes an object of state variables to update or a function which returns an object of state variables.");this.updater.enqueueSetState(this,e,n,"setState")};lt.prototype.forceUpdate=function(e){this.updater.enqueueForceUpdate(this,e,"forceUpdate")};function Yi(){}Yi.prototype=lt.prototype;function Vo(e,n,t){this.props=e,this.context=n,this.refs=Ki,this.updater=t||Wi}var Fo=Vo.prototype=new Yi;Fo.constructor=Vo;Qi(Fo,lt.prototype);Fo.isPureReactComponent=!0;var Du=Array.isArray,Zi=Object.prototype.hasOwnProperty,Ho={current:null},Xi={key:!0,ref:!0,__self:!0,__source:!0};function Gi(e,n,t){var r,l={},o=null,u=null;if(n!=null)for(r in n.ref!==void 0&&(u=n.ref),n.key!==void 0&&(o=""+n.key),n)Zi.call(n,r)&&!Xi.hasOwnProperty(r)&&(l[r]=n[r]);var i=arguments.length-2;if(i===1)l.children=t;else if(1>>1,X=C[W];if(0>>1;Wl(yl,z))ynl(bt,yl)?(C[W]=bt,C[yn]=z,W=yn):(C[W]=yl,C[vn]=z,W=vn);else if(ynl(bt,z))C[W]=bt,C[yn]=z,W=yn;else break e}}return P}function l(C,P){var z=C.sortIndex-P.sortIndex;return z!==0?z:C.id-P.id}if(typeof performance=="object"&&typeof performance.now=="function"){var o=performance;e.unstable_now=function(){return o.now()}}else{var u=Date,i=u.now();e.unstable_now=function(){return u.now()-i}}var s=[],c=[],h=1,m=null,p=3,g=!1,w=!1,S=!1,I=typeof setTimeout=="function"?setTimeout:null,f=typeof clearTimeout=="function"?clearTimeout:null,a=typeof setImmediate<"u"?setImmediate:null;typeof navigator<"u"&&navigator.scheduling!==void 0&&navigator.scheduling.isInputPending!==void 0&&navigator.scheduling.isInputPending.bind(navigator.scheduling);function d(C){for(var P=t(c);P!==null;){if(P.callback===null)r(c);else if(P.startTime<=C)r(c),P.sortIndex=P.expirationTime,n(s,P);else break;P=t(c)}}function v(C){if(S=!1,d(C),!w)if(t(s)!==null)w=!0,hl(E);else{var P=t(c);P!==null&&vl(v,P.startTime-C)}}function E(C,P){w=!1,S&&(S=!1,f(N),N=-1),g=!0;var z=p;try{for(d(P),m=t(s);m!==null&&(!(m.expirationTime>P)||C&&!Ne());){var W=m.callback;if(typeof W=="function"){m.callback=null,p=m.priorityLevel;var X=W(m.expirationTime<=P);P=e.unstable_now(),typeof X=="function"?m.callback=X:m===t(s)&&r(s),d(P)}else r(s);m=t(s)}if(m!==null)var qt=!0;else{var vn=t(c);vn!==null&&vl(v,vn.startTime-P),qt=!1}return qt}finally{m=null,p=z,g=!1}}var x=!1,_=null,N=-1,B=5,T=-1;function Ne(){return!(e.unstable_now()-TC||125W?(C.sortIndex=z,n(c,C),t(s)===null&&C===t(c)&&(S?(f(N),N=-1):S=!0,vl(v,z-W))):(C.sortIndex=X,n(s,C),w||g||(w=!0,hl(E))),C},e.unstable_shouldYield=Ne,e.unstable_wrapCallback=function(C){var P=p;return function(){var z=p;p=P;try{return C.apply(this,arguments)}finally{p=z}}}})(es);bi.exports=es;var xc=bi.exports;/** - * @license React - * react-dom.production.min.js - * - * Copyright (c) Facebook, Inc. and its affiliates. - * - * This source code is licensed under the MIT license found in the - * LICENSE file in the root directory of this source tree. - */var ns=$o,ye=xc;function y(e){for(var n="https://reactjs.org/docs/error-decoder.html?invariant="+e,t=1;t"u"||typeof window.document>"u"||typeof window.document.createElement>"u"),Wl=Object.prototype.hasOwnProperty,_c=/^[:A-Z_a-z\u00C0-\u00D6\u00D8-\u00F6\u00F8-\u02FF\u0370-\u037D\u037F-\u1FFF\u200C-\u200D\u2070-\u218F\u2C00-\u2FEF\u3001-\uD7FF\uF900-\uFDCF\uFDF0-\uFFFD][:A-Z_a-z\u00C0-\u00D6\u00D8-\u00F6\u00F8-\u02FF\u0370-\u037D\u037F-\u1FFF\u200C-\u200D\u2070-\u218F\u2C00-\u2FEF\u3001-\uD7FF\uF900-\uFDCF\uFDF0-\uFFFD\-.0-9\u00B7\u0300-\u036F\u203F-\u2040]*$/,Iu={},Vu={};function Nc(e){return Wl.call(Vu,e)?!0:Wl.call(Iu,e)?!1:_c.test(e)?Vu[e]=!0:(Iu[e]=!0,!1)}function Pc(e,n,t,r){if(t!==null&&t.type===0)return!1;switch(typeof n){case"function":case"symbol":return!0;case"boolean":return r?!1:t!==null?!t.acceptsBooleans:(e=e.toLowerCase().slice(0,5),e!=="data-"&&e!=="aria-");default:return!1}}function zc(e,n,t,r){if(n===null||typeof n>"u"||Pc(e,n,t,r))return!0;if(r)return!1;if(t!==null)switch(t.type){case 3:return!n;case 4:return n===!1;case 5:return isNaN(n);case 6:return isNaN(n)||1>n}return!1}function se(e,n,t,r,l,o,u){this.acceptsBooleans=n===2||n===3||n===4,this.attributeName=r,this.attributeNamespace=l,this.mustUseProperty=t,this.propertyName=e,this.type=n,this.sanitizeURL=o,this.removeEmptyString=u}var ee={};"children dangerouslySetInnerHTML defaultValue defaultChecked innerHTML suppressContentEditableWarning suppressHydrationWarning style".split(" ").forEach(function(e){ee[e]=new se(e,0,!1,e,null,!1,!1)});[["acceptCharset","accept-charset"],["className","class"],["htmlFor","for"],["httpEquiv","http-equiv"]].forEach(function(e){var n=e[0];ee[n]=new se(n,1,!1,e[1],null,!1,!1)});["contentEditable","draggable","spellCheck","value"].forEach(function(e){ee[e]=new se(e,2,!1,e.toLowerCase(),null,!1,!1)});["autoReverse","externalResourcesRequired","focusable","preserveAlpha"].forEach(function(e){ee[e]=new se(e,2,!1,e,null,!1,!1)});"allowFullScreen async autoFocus autoPlay controls default defer disabled disablePictureInPicture disableRemotePlayback formNoValidate hidden loop noModule noValidate open playsInline readOnly required reversed scoped seamless itemScope".split(" ").forEach(function(e){ee[e]=new se(e,3,!1,e.toLowerCase(),null,!1,!1)});["checked","multiple","muted","selected"].forEach(function(e){ee[e]=new se(e,3,!0,e,null,!1,!1)});["capture","download"].forEach(function(e){ee[e]=new se(e,4,!1,e,null,!1,!1)});["cols","rows","size","span"].forEach(function(e){ee[e]=new se(e,6,!1,e,null,!1,!1)});["rowSpan","start"].forEach(function(e){ee[e]=new se(e,5,!1,e.toLowerCase(),null,!1,!1)});var Ao=/[\-:]([a-z])/g;function Bo(e){return e[1].toUpperCase()}"accent-height alignment-baseline arabic-form baseline-shift cap-height clip-path clip-rule color-interpolation color-interpolation-filters color-profile color-rendering dominant-baseline enable-background fill-opacity fill-rule flood-color flood-opacity font-family font-size font-size-adjust font-stretch font-style font-variant font-weight glyph-name glyph-orientation-horizontal glyph-orientation-vertical horiz-adv-x horiz-origin-x image-rendering letter-spacing lighting-color marker-end marker-mid marker-start overline-position overline-thickness paint-order panose-1 pointer-events rendering-intent shape-rendering stop-color stop-opacity strikethrough-position strikethrough-thickness stroke-dasharray stroke-dashoffset stroke-linecap stroke-linejoin stroke-miterlimit stroke-opacity stroke-width text-anchor text-decoration text-rendering underline-position underline-thickness unicode-bidi unicode-range units-per-em v-alphabetic v-hanging v-ideographic v-mathematical vector-effect vert-adv-y vert-origin-x vert-origin-y word-spacing writing-mode xmlns:xlink x-height".split(" ").forEach(function(e){var n=e.replace(Ao,Bo);ee[n]=new se(n,1,!1,e,null,!1,!1)});"xlink:actuate xlink:arcrole xlink:role xlink:show xlink:title xlink:type".split(" ").forEach(function(e){var n=e.replace(Ao,Bo);ee[n]=new se(n,1,!1,e,"http://www.w3.org/1999/xlink",!1,!1)});["xml:base","xml:lang","xml:space"].forEach(function(e){var n=e.replace(Ao,Bo);ee[n]=new se(n,1,!1,e,"http://www.w3.org/XML/1998/namespace",!1,!1)});["tabIndex","crossOrigin"].forEach(function(e){ee[e]=new se(e,1,!1,e.toLowerCase(),null,!1,!1)});ee.xlinkHref=new se("xlinkHref",1,!1,"xlink:href","http://www.w3.org/1999/xlink",!0,!1);["src","href","action","formAction"].forEach(function(e){ee[e]=new se(e,1,!1,e.toLowerCase(),null,!0,!0)});function Wo(e,n,t,r){var l=ee.hasOwnProperty(n)?ee[n]:null;(l!==null?l.type!==0:r||!(2i||l[u]!==o[i]){var s=` -`+l[u].replace(" at new "," at ");return e.displayName&&s.includes("")&&(s=s.replace("",e.displayName)),s}while(1<=u&&0<=i);break}}}finally{Sl=!1,Error.prepareStackTrace=t}return(e=e?e.displayName||e.name:"")?yt(e):""}function Lc(e){switch(e.tag){case 5:return yt(e.type);case 16:return yt("Lazy");case 13:return yt("Suspense");case 19:return yt("SuspenseList");case 0:case 2:case 15:return e=kl(e.type,!1),e;case 11:return e=kl(e.type.render,!1),e;case 1:return e=kl(e.type,!0),e;default:return""}}function Zl(e){if(e==null)return null;if(typeof e=="function")return e.displayName||e.name||null;if(typeof e=="string")return e;switch(e){case Dn:return"Fragment";case On:return"Portal";case Ql:return"Profiler";case Qo:return"StrictMode";case Kl:return"Suspense";case Yl:return"SuspenseList"}if(typeof e=="object")switch(e.$$typeof){case ls:return(e.displayName||"Context")+".Consumer";case rs:return(e._context.displayName||"Context")+".Provider";case Ko:var n=e.render;return e=e.displayName,e||(e=n.displayName||n.name||"",e=e!==""?"ForwardRef("+e+")":"ForwardRef"),e;case Yo:return n=e.displayName||null,n!==null?n:Zl(e.type)||"Memo";case Ge:n=e._payload,e=e._init;try{return Zl(e(n))}catch{}}return null}function Tc(e){var n=e.type;switch(e.tag){case 24:return"Cache";case 9:return(n.displayName||"Context")+".Consumer";case 10:return(n._context.displayName||"Context")+".Provider";case 18:return"DehydratedFragment";case 11:return e=n.render,e=e.displayName||e.name||"",n.displayName||(e!==""?"ForwardRef("+e+")":"ForwardRef");case 7:return"Fragment";case 5:return n;case 4:return"Portal";case 3:return"Root";case 6:return"Text";case 16:return Zl(n);case 8:return n===Qo?"StrictMode":"Mode";case 22:return"Offscreen";case 12:return"Profiler";case 21:return"Scope";case 13:return"Suspense";case 19:return"SuspenseList";case 25:return"TracingMarker";case 1:case 0:case 17:case 2:case 14:case 15:if(typeof n=="function")return n.displayName||n.name||null;if(typeof n=="string")return n}return null}function fn(e){switch(typeof e){case"boolean":case"number":case"string":case"undefined":return e;case"object":return e;default:return""}}function us(e){var n=e.type;return(e=e.nodeName)&&e.toLowerCase()==="input"&&(n==="checkbox"||n==="radio")}function Rc(e){var n=us(e)?"checked":"value",t=Object.getOwnPropertyDescriptor(e.constructor.prototype,n),r=""+e[n];if(!e.hasOwnProperty(n)&&typeof t<"u"&&typeof t.get=="function"&&typeof t.set=="function"){var l=t.get,o=t.set;return Object.defineProperty(e,n,{configurable:!0,get:function(){return l.call(this)},set:function(u){r=""+u,o.call(this,u)}}),Object.defineProperty(e,n,{enumerable:t.enumerable}),{getValue:function(){return r},setValue:function(u){r=""+u},stopTracking:function(){e._valueTracker=null,delete e[n]}}}}function tr(e){e._valueTracker||(e._valueTracker=Rc(e))}function is(e){if(!e)return!1;var n=e._valueTracker;if(!n)return!0;var t=n.getValue(),r="";return e&&(r=us(e)?e.checked?"true":"false":e.value),e=r,e!==t?(n.setValue(e),!0):!1}function Lr(e){if(e=e||(typeof document<"u"?document:void 0),typeof e>"u")return null;try{return e.activeElement||e.body}catch{return e.body}}function Xl(e,n){var t=n.checked;return U({},n,{defaultChecked:void 0,defaultValue:void 0,value:void 0,checked:t??e._wrapperState.initialChecked})}function Hu(e,n){var t=n.defaultValue==null?"":n.defaultValue,r=n.checked!=null?n.checked:n.defaultChecked;t=fn(n.value!=null?n.value:t),e._wrapperState={initialChecked:r,initialValue:t,controlled:n.type==="checkbox"||n.type==="radio"?n.checked!=null:n.value!=null}}function ss(e,n){n=n.checked,n!=null&&Wo(e,"checked",n,!1)}function Gl(e,n){ss(e,n);var t=fn(n.value),r=n.type;if(t!=null)r==="number"?(t===0&&e.value===""||e.value!=t)&&(e.value=""+t):e.value!==""+t&&(e.value=""+t);else if(r==="submit"||r==="reset"){e.removeAttribute("value");return}n.hasOwnProperty("value")?Jl(e,n.type,t):n.hasOwnProperty("defaultValue")&&Jl(e,n.type,fn(n.defaultValue)),n.checked==null&&n.defaultChecked!=null&&(e.defaultChecked=!!n.defaultChecked)}function Uu(e,n,t){if(n.hasOwnProperty("value")||n.hasOwnProperty("defaultValue")){var r=n.type;if(!(r!=="submit"&&r!=="reset"||n.value!==void 0&&n.value!==null))return;n=""+e._wrapperState.initialValue,t||n===e.value||(e.value=n),e.defaultValue=n}t=e.name,t!==""&&(e.name=""),e.defaultChecked=!!e._wrapperState.initialChecked,t!==""&&(e.name=t)}function Jl(e,n,t){(n!=="number"||Lr(e.ownerDocument)!==e)&&(t==null?e.defaultValue=""+e._wrapperState.initialValue:e.defaultValue!==""+t&&(e.defaultValue=""+t))}var gt=Array.isArray;function Qn(e,n,t,r){if(e=e.options,n){n={};for(var l=0;l"+n.valueOf().toString()+"",n=rr.firstChild;e.firstChild;)e.removeChild(e.firstChild);for(;n.firstChild;)e.appendChild(n.firstChild)}});function Rt(e,n){if(n){var t=e.firstChild;if(t&&t===e.lastChild&&t.nodeType===3){t.nodeValue=n;return}}e.textContent=n}var kt={animationIterationCount:!0,aspectRatio:!0,borderImageOutset:!0,borderImageSlice:!0,borderImageWidth:!0,boxFlex:!0,boxFlexGroup:!0,boxOrdinalGroup:!0,columnCount:!0,columns:!0,flex:!0,flexGrow:!0,flexPositive:!0,flexShrink:!0,flexNegative:!0,flexOrder:!0,gridArea:!0,gridRow:!0,gridRowEnd:!0,gridRowSpan:!0,gridRowStart:!0,gridColumn:!0,gridColumnEnd:!0,gridColumnSpan:!0,gridColumnStart:!0,fontWeight:!0,lineClamp:!0,lineHeight:!0,opacity:!0,order:!0,orphans:!0,tabSize:!0,widows:!0,zIndex:!0,zoom:!0,fillOpacity:!0,floodOpacity:!0,stopOpacity:!0,strokeDasharray:!0,strokeDashoffset:!0,strokeMiterlimit:!0,strokeOpacity:!0,strokeWidth:!0},Mc=["Webkit","ms","Moz","O"];Object.keys(kt).forEach(function(e){Mc.forEach(function(n){n=n+e.charAt(0).toUpperCase()+e.substring(1),kt[n]=kt[e]})});function ds(e,n,t){return n==null||typeof n=="boolean"||n===""?"":t||typeof n!="number"||n===0||kt.hasOwnProperty(e)&&kt[e]?(""+n).trim():n+"px"}function ps(e,n){e=e.style;for(var t in n)if(n.hasOwnProperty(t)){var r=t.indexOf("--")===0,l=ds(t,n[t],r);t==="float"&&(t="cssFloat"),r?e.setProperty(t,l):e[t]=l}}var Oc=U({menuitem:!0},{area:!0,base:!0,br:!0,col:!0,embed:!0,hr:!0,img:!0,input:!0,keygen:!0,link:!0,meta:!0,param:!0,source:!0,track:!0,wbr:!0});function eo(e,n){if(n){if(Oc[e]&&(n.children!=null||n.dangerouslySetInnerHTML!=null))throw Error(y(137,e));if(n.dangerouslySetInnerHTML!=null){if(n.children!=null)throw Error(y(60));if(typeof n.dangerouslySetInnerHTML!="object"||!("__html"in n.dangerouslySetInnerHTML))throw Error(y(61))}if(n.style!=null&&typeof n.style!="object")throw Error(y(62))}}function no(e,n){if(e.indexOf("-")===-1)return typeof n.is=="string";switch(e){case"annotation-xml":case"color-profile":case"font-face":case"font-face-src":case"font-face-uri":case"font-face-format":case"font-face-name":case"missing-glyph":return!1;default:return!0}}var to=null;function Zo(e){return e=e.target||e.srcElement||window,e.correspondingUseElement&&(e=e.correspondingUseElement),e.nodeType===3?e.parentNode:e}var ro=null,Kn=null,Yn=null;function Bu(e){if(e=Gt(e)){if(typeof ro!="function")throw Error(y(280));var n=e.stateNode;n&&(n=ll(n),ro(e.stateNode,e.type,n))}}function ms(e){Kn?Yn?Yn.push(e):Yn=[e]:Kn=e}function hs(){if(Kn){var e=Kn,n=Yn;if(Yn=Kn=null,Bu(e),n)for(e=0;e>>=0,e===0?32:31-(Wc(e)/Qc|0)|0}var lr=64,or=4194304;function wt(e){switch(e&-e){case 1:return 1;case 2:return 2;case 4:return 4;case 8:return 8;case 16:return 16;case 32:return 32;case 64:case 128:case 256:case 512:case 1024:case 2048:case 4096:case 8192:case 16384:case 32768:case 65536:case 131072:case 262144:case 524288:case 1048576:case 2097152:return e&4194240;case 4194304:case 8388608:case 16777216:case 33554432:case 67108864:return e&130023424;case 134217728:return 134217728;case 268435456:return 268435456;case 536870912:return 536870912;case 1073741824:return 1073741824;default:return e}}function Or(e,n){var t=e.pendingLanes;if(t===0)return 0;var r=0,l=e.suspendedLanes,o=e.pingedLanes,u=t&268435455;if(u!==0){var i=u&~l;i!==0?r=wt(i):(o&=u,o!==0&&(r=wt(o)))}else u=t&~l,u!==0?r=wt(u):o!==0&&(r=wt(o));if(r===0)return 0;if(n!==0&&n!==r&&!(n&l)&&(l=r&-r,o=n&-n,l>=o||l===16&&(o&4194240)!==0))return n;if(r&4&&(r|=t&16),n=e.entangledLanes,n!==0)for(e=e.entanglements,n&=r;0t;t++)n.push(e);return n}function Zt(e,n,t){e.pendingLanes|=n,n!==536870912&&(e.suspendedLanes=0,e.pingedLanes=0),e=e.eventTimes,n=31-Re(n),e[n]=t}function Xc(e,n){var t=e.pendingLanes&~n;e.pendingLanes=n,e.suspendedLanes=0,e.pingedLanes=0,e.expiredLanes&=n,e.mutableReadLanes&=n,e.entangledLanes&=n,n=e.entanglements;var r=e.eventTimes;for(e=e.expirationTimes;0=Ct),qu=String.fromCharCode(32),bu=!1;function js(e,n){switch(e){case"keyup":return xf.indexOf(n.keyCode)!==-1;case"keydown":return n.keyCode!==229;case"keypress":case"mousedown":case"focusout":return!0;default:return!1}}function Is(e){return e=e.detail,typeof e=="object"&&"data"in e?e.data:null}var jn=!1;function Nf(e,n){switch(e){case"compositionend":return Is(n);case"keypress":return n.which!==32?null:(bu=!0,qu);case"textInput":return e=n.data,e===qu&&bu?null:e;default:return null}}function Pf(e,n){if(jn)return e==="compositionend"||!tu&&js(e,n)?(e=Os(),Sr=bo=en=null,jn=!1,e):null;switch(e){case"paste":return null;case"keypress":if(!(n.ctrlKey||n.altKey||n.metaKey)||n.ctrlKey&&n.altKey){if(n.char&&1=n)return{node:t,offset:n-e};e=r}e:{for(;t;){if(t.nextSibling){t=t.nextSibling;break e}t=t.parentNode}t=void 0}t=ri(t)}}function Us(e,n){return e&&n?e===n?!0:e&&e.nodeType===3?!1:n&&n.nodeType===3?Us(e,n.parentNode):"contains"in e?e.contains(n):e.compareDocumentPosition?!!(e.compareDocumentPosition(n)&16):!1:!1}function $s(){for(var e=window,n=Lr();n instanceof e.HTMLIFrameElement;){try{var t=typeof n.contentWindow.location.href=="string"}catch{t=!1}if(t)e=n.contentWindow;else break;n=Lr(e.document)}return n}function ru(e){var n=e&&e.nodeName&&e.nodeName.toLowerCase();return n&&(n==="input"&&(e.type==="text"||e.type==="search"||e.type==="tel"||e.type==="url"||e.type==="password")||n==="textarea"||e.contentEditable==="true")}function If(e){var n=$s(),t=e.focusedElem,r=e.selectionRange;if(n!==t&&t&&t.ownerDocument&&Us(t.ownerDocument.documentElement,t)){if(r!==null&&ru(t)){if(n=r.start,e=r.end,e===void 0&&(e=n),"selectionStart"in t)t.selectionStart=n,t.selectionEnd=Math.min(e,t.value.length);else if(e=(n=t.ownerDocument||document)&&n.defaultView||window,e.getSelection){e=e.getSelection();var l=t.textContent.length,o=Math.min(r.start,l);r=r.end===void 0?o:Math.min(r.end,l),!e.extend&&o>r&&(l=r,r=o,o=l),l=li(t,o);var u=li(t,r);l&&u&&(e.rangeCount!==1||e.anchorNode!==l.node||e.anchorOffset!==l.offset||e.focusNode!==u.node||e.focusOffset!==u.offset)&&(n=n.createRange(),n.setStart(l.node,l.offset),e.removeAllRanges(),o>r?(e.addRange(n),e.extend(u.node,u.offset)):(n.setEnd(u.node,u.offset),e.addRange(n)))}}for(n=[],e=t;e=e.parentNode;)e.nodeType===1&&n.push({element:e,left:e.scrollLeft,top:e.scrollTop});for(typeof t.focus=="function"&&t.focus(),t=0;t=document.documentMode,In=null,ao=null,_t=null,co=!1;function oi(e,n,t){var r=t.window===t?t.document:t.nodeType===9?t:t.ownerDocument;co||In==null||In!==Lr(r)||(r=In,"selectionStart"in r&&ru(r)?r={start:r.selectionStart,end:r.selectionEnd}:(r=(r.ownerDocument&&r.ownerDocument.defaultView||window).getSelection(),r={anchorNode:r.anchorNode,anchorOffset:r.anchorOffset,focusNode:r.focusNode,focusOffset:r.focusOffset}),_t&&Vt(_t,r)||(_t=r,r=Ir(ao,"onSelect"),0Hn||(e.current=yo[Hn],yo[Hn]=null,Hn--)}function O(e,n){Hn++,yo[Hn]=e.current,e.current=n}var dn={},le=mn(dn),fe=mn(!1),_n=dn;function qn(e,n){var t=e.type.contextTypes;if(!t)return dn;var r=e.stateNode;if(r&&r.__reactInternalMemoizedUnmaskedChildContext===n)return r.__reactInternalMemoizedMaskedChildContext;var l={},o;for(o in t)l[o]=n[o];return r&&(e=e.stateNode,e.__reactInternalMemoizedUnmaskedChildContext=n,e.__reactInternalMemoizedMaskedChildContext=l),l}function de(e){return e=e.childContextTypes,e!=null}function Fr(){j(fe),j(le)}function di(e,n,t){if(le.current!==dn)throw Error(y(168));O(le,n),O(fe,t)}function Gs(e,n,t){var r=e.stateNode;if(n=n.childContextTypes,typeof r.getChildContext!="function")return t;r=r.getChildContext();for(var l in r)if(!(l in n))throw Error(y(108,Tc(e)||"Unknown",l));return U({},t,r)}function Hr(e){return e=(e=e.stateNode)&&e.__reactInternalMemoizedMergedChildContext||dn,_n=le.current,O(le,e),O(fe,fe.current),!0}function pi(e,n,t){var r=e.stateNode;if(!r)throw Error(y(169));t?(e=Gs(e,n,_n),r.__reactInternalMemoizedMergedChildContext=e,j(fe),j(le),O(le,e)):j(fe),O(fe,t)}var Ue=null,ol=!1,jl=!1;function Js(e){Ue===null?Ue=[e]:Ue.push(e)}function Zf(e){ol=!0,Js(e)}function hn(){if(!jl&&Ue!==null){jl=!0;var e=0,n=M;try{var t=Ue;for(M=1;e>=u,l-=u,$e=1<<32-Re(n)+l|t<N?(B=_,_=null):B=_.sibling;var T=p(f,_,d[N],v);if(T===null){_===null&&(_=B);break}e&&_&&T.alternate===null&&n(f,_),a=o(T,a,N),x===null?E=T:x.sibling=T,x=T,_=B}if(N===d.length)return t(f,_),V&&gn(f,N),E;if(_===null){for(;NN?(B=_,_=null):B=_.sibling;var Ne=p(f,_,T.value,v);if(Ne===null){_===null&&(_=B);break}e&&_&&Ne.alternate===null&&n(f,_),a=o(Ne,a,N),x===null?E=Ne:x.sibling=Ne,x=Ne,_=B}if(T.done)return t(f,_),V&&gn(f,N),E;if(_===null){for(;!T.done;N++,T=d.next())T=m(f,T.value,v),T!==null&&(a=o(T,a,N),x===null?E=T:x.sibling=T,x=T);return V&&gn(f,N),E}for(_=r(f,_);!T.done;N++,T=d.next())T=g(_,f,N,T.value,v),T!==null&&(e&&T.alternate!==null&&_.delete(T.key===null?N:T.key),a=o(T,a,N),x===null?E=T:x.sibling=T,x=T);return e&&_.forEach(function(it){return n(f,it)}),V&&gn(f,N),E}function I(f,a,d,v){if(typeof d=="object"&&d!==null&&d.type===Dn&&d.key===null&&(d=d.props.children),typeof d=="object"&&d!==null){switch(d.$$typeof){case nr:e:{for(var E=d.key,x=a;x!==null;){if(x.key===E){if(E=d.type,E===Dn){if(x.tag===7){t(f,x.sibling),a=l(x,d.props.children),a.return=f,f=a;break e}}else if(x.elementType===E||typeof E=="object"&&E!==null&&E.$$typeof===Ge&&Si(E)===x.type){t(f,x.sibling),a=l(x,d.props),a.ref=mt(f,x,d),a.return=f,f=a;break e}t(f,x);break}else n(f,x);x=x.sibling}d.type===Dn?(a=xn(d.props.children,f.mode,v,d.key),a.return=f,f=a):(v=zr(d.type,d.key,d.props,null,f.mode,v),v.ref=mt(f,a,d),v.return=f,f=v)}return u(f);case On:e:{for(x=d.key;a!==null;){if(a.key===x)if(a.tag===4&&a.stateNode.containerInfo===d.containerInfo&&a.stateNode.implementation===d.implementation){t(f,a.sibling),a=l(a,d.children||[]),a.return=f,f=a;break e}else{t(f,a);break}else n(f,a);a=a.sibling}a=Bl(d,f.mode,v),a.return=f,f=a}return u(f);case Ge:return x=d._init,I(f,a,x(d._payload),v)}if(gt(d))return w(f,a,d,v);if(at(d))return S(f,a,d,v);dr(f,d)}return typeof d=="string"&&d!==""||typeof d=="number"?(d=""+d,a!==null&&a.tag===6?(t(f,a.sibling),a=l(a,d),a.return=f,f=a):(t(f,a),a=Al(d,f.mode,v),a.return=f,f=a),u(f)):t(f,a)}return I}var et=oa(!0),ua=oa(!1),Jt={},Fe=mn(Jt),$t=mn(Jt),At=mn(Jt);function En(e){if(e===Jt)throw Error(y(174));return e}function du(e,n){switch(O(At,n),O($t,e),O(Fe,Jt),e=n.nodeType,e){case 9:case 11:n=(n=n.documentElement)?n.namespaceURI:bl(null,"");break;default:e=e===8?n.parentNode:n,n=e.namespaceURI||null,e=e.tagName,n=bl(n,e)}j(Fe),O(Fe,n)}function nt(){j(Fe),j($t),j(At)}function ia(e){En(At.current);var n=En(Fe.current),t=bl(n,e.type);n!==t&&(O($t,e),O(Fe,t))}function pu(e){$t.current===e&&(j(Fe),j($t))}var F=mn(0);function Qr(e){for(var n=e;n!==null;){if(n.tag===13){var t=n.memoizedState;if(t!==null&&(t=t.dehydrated,t===null||t.data==="$?"||t.data==="$!"))return n}else if(n.tag===19&&n.memoizedProps.revealOrder!==void 0){if(n.flags&128)return n}else if(n.child!==null){n.child.return=n,n=n.child;continue}if(n===e)break;for(;n.sibling===null;){if(n.return===null||n.return===e)return null;n=n.return}n.sibling.return=n.return,n=n.sibling}return null}var Il=[];function mu(){for(var e=0;et?t:4,e(!0);var r=Vl.transition;Vl.transition={};try{e(!1),n()}finally{M=t,Vl.transition=r}}function Ca(){return _e().memoizedState}function qf(e,n,t){var r=an(e);if(t={lane:r,action:t,hasEagerState:!1,eagerState:null,next:null},xa(e))_a(n,t);else if(t=na(e,n,t,r),t!==null){var l=ue();Me(t,e,r,l),Na(t,n,r)}}function bf(e,n,t){var r=an(e),l={lane:r,action:t,hasEagerState:!1,eagerState:null,next:null};if(xa(e))_a(n,l);else{var o=e.alternate;if(e.lanes===0&&(o===null||o.lanes===0)&&(o=n.lastRenderedReducer,o!==null))try{var u=n.lastRenderedState,i=o(u,t);if(l.hasEagerState=!0,l.eagerState=i,Oe(i,u)){var s=n.interleaved;s===null?(l.next=l,cu(n)):(l.next=s.next,s.next=l),n.interleaved=l;return}}catch{}finally{}t=na(e,n,l,r),t!==null&&(l=ue(),Me(t,e,r,l),Na(t,n,r))}}function xa(e){var n=e.alternate;return e===H||n!==null&&n===H}function _a(e,n){Nt=Kr=!0;var t=e.pending;t===null?n.next=n:(n.next=t.next,t.next=n),e.pending=n}function Na(e,n,t){if(t&4194240){var r=n.lanes;r&=e.pendingLanes,t|=r,n.lanes=t,Go(e,t)}}var Yr={readContext:xe,useCallback:ne,useContext:ne,useEffect:ne,useImperativeHandle:ne,useInsertionEffect:ne,useLayoutEffect:ne,useMemo:ne,useReducer:ne,useRef:ne,useState:ne,useDebugValue:ne,useDeferredValue:ne,useTransition:ne,useMutableSource:ne,useSyncExternalStore:ne,useId:ne,unstable_isNewReconciler:!1},ed={readContext:xe,useCallback:function(e,n){return je().memoizedState=[e,n===void 0?null:n],e},useContext:xe,useEffect:Ei,useImperativeHandle:function(e,n,t){return t=t!=null?t.concat([e]):null,xr(4194308,4,ga.bind(null,n,e),t)},useLayoutEffect:function(e,n){return xr(4194308,4,e,n)},useInsertionEffect:function(e,n){return xr(4,2,e,n)},useMemo:function(e,n){var t=je();return n=n===void 0?null:n,e=e(),t.memoizedState=[e,n],e},useReducer:function(e,n,t){var r=je();return n=t!==void 0?t(n):n,r.memoizedState=r.baseState=n,e={pending:null,interleaved:null,lanes:0,dispatch:null,lastRenderedReducer:e,lastRenderedState:n},r.queue=e,e=e.dispatch=qf.bind(null,H,e),[r.memoizedState,e]},useRef:function(e){var n=je();return e={current:e},n.memoizedState=e},useState:ki,useDebugValue:wu,useDeferredValue:function(e){return je().memoizedState=e},useTransition:function(){var e=ki(!1),n=e[0];return e=Jf.bind(null,e[1]),je().memoizedState=e,[n,e]},useMutableSource:function(){},useSyncExternalStore:function(e,n,t){var r=H,l=je();if(V){if(t===void 0)throw Error(y(407));t=t()}else{if(t=n(),J===null)throw Error(y(349));Pn&30||ca(r,n,t)}l.memoizedState=t;var o={value:t,getSnapshot:n};return l.queue=o,Ei(da.bind(null,r,o,e),[e]),r.flags|=2048,Qt(9,fa.bind(null,r,o,t,n),void 0,null),t},useId:function(){var e=je(),n=J.identifierPrefix;if(V){var t=Ae,r=$e;t=(r&~(1<<32-Re(r)-1)).toString(32)+t,n=":"+n+"R"+t,t=Bt++,0<\/script>",e=e.removeChild(e.firstChild)):typeof r.is=="string"?e=u.createElement(t,{is:r.is}):(e=u.createElement(t),t==="select"&&(u=e,r.multiple?u.multiple=!0:r.size&&(u.size=r.size))):e=u.createElementNS(e,t),e[Ie]=n,e[Ut]=r,ja(e,n,!1,!1),n.stateNode=e;e:{switch(u=no(t,r),t){case"dialog":D("cancel",e),D("close",e),l=r;break;case"iframe":case"object":case"embed":D("load",e),l=r;break;case"video":case"audio":for(l=0;lrt&&(n.flags|=128,r=!0,ht(o,!1),n.lanes=4194304)}else{if(!r)if(e=Qr(u),e!==null){if(n.flags|=128,r=!0,t=e.updateQueue,t!==null&&(n.updateQueue=t,n.flags|=4),ht(o,!0),o.tail===null&&o.tailMode==="hidden"&&!u.alternate&&!V)return te(n),null}else 2*Q()-o.renderingStartTime>rt&&t!==1073741824&&(n.flags|=128,r=!0,ht(o,!1),n.lanes=4194304);o.isBackwards?(u.sibling=n.child,n.child=u):(t=o.last,t!==null?t.sibling=u:n.child=u,o.last=u)}return o.tail!==null?(n=o.tail,o.rendering=n,o.tail=n.sibling,o.renderingStartTime=Q(),n.sibling=null,t=F.current,O(F,r?t&1|2:t&1),n):(te(n),null);case 22:case 23:return _u(),r=n.memoizedState!==null,e!==null&&e.memoizedState!==null!==r&&(n.flags|=8192),r&&n.mode&1?me&1073741824&&(te(n),n.subtreeFlags&6&&(n.flags|=8192)):te(n),null;case 24:return null;case 25:return null}throw Error(y(156,n.tag))}function sd(e,n){switch(ou(n),n.tag){case 1:return de(n.type)&&Fr(),e=n.flags,e&65536?(n.flags=e&-65537|128,n):null;case 3:return nt(),j(fe),j(le),mu(),e=n.flags,e&65536&&!(e&128)?(n.flags=e&-65537|128,n):null;case 5:return pu(n),null;case 13:if(j(F),e=n.memoizedState,e!==null&&e.dehydrated!==null){if(n.alternate===null)throw Error(y(340));bn()}return e=n.flags,e&65536?(n.flags=e&-65537|128,n):null;case 19:return j(F),null;case 4:return nt(),null;case 10:return au(n.type._context),null;case 22:case 23:return _u(),null;case 24:return null;default:return null}}var mr=!1,re=!1,ad=typeof WeakSet=="function"?WeakSet:Set,k=null;function Bn(e,n){var t=e.ref;if(t!==null)if(typeof t=="function")try{t(null)}catch(r){A(e,n,r)}else t.current=null}function Lo(e,n,t){try{t()}catch(r){A(e,n,r)}}var Ri=!1;function cd(e,n){if(fo=Dr,e=$s(),ru(e)){if("selectionStart"in e)var t={start:e.selectionStart,end:e.selectionEnd};else e:{t=(t=e.ownerDocument)&&t.defaultView||window;var r=t.getSelection&&t.getSelection();if(r&&r.rangeCount!==0){t=r.anchorNode;var l=r.anchorOffset,o=r.focusNode;r=r.focusOffset;try{t.nodeType,o.nodeType}catch{t=null;break e}var u=0,i=-1,s=-1,c=0,h=0,m=e,p=null;n:for(;;){for(var g;m!==t||l!==0&&m.nodeType!==3||(i=u+l),m!==o||r!==0&&m.nodeType!==3||(s=u+r),m.nodeType===3&&(u+=m.nodeValue.length),(g=m.firstChild)!==null;)p=m,m=g;for(;;){if(m===e)break n;if(p===t&&++c===l&&(i=u),p===o&&++h===r&&(s=u),(g=m.nextSibling)!==null)break;m=p,p=m.parentNode}m=g}t=i===-1||s===-1?null:{start:i,end:s}}else t=null}t=t||{start:0,end:0}}else t=null;for(po={focusedElem:e,selectionRange:t},Dr=!1,k=n;k!==null;)if(n=k,e=n.child,(n.subtreeFlags&1028)!==0&&e!==null)e.return=n,k=e;else for(;k!==null;){n=k;try{var w=n.alternate;if(n.flags&1024)switch(n.tag){case 0:case 11:case 15:break;case 1:if(w!==null){var S=w.memoizedProps,I=w.memoizedState,f=n.stateNode,a=f.getSnapshotBeforeUpdate(n.elementType===n.type?S:ze(n.type,S),I);f.__reactInternalSnapshotBeforeUpdate=a}break;case 3:var d=n.stateNode.containerInfo;d.nodeType===1?d.textContent="":d.nodeType===9&&d.documentElement&&d.removeChild(d.documentElement);break;case 5:case 6:case 4:case 17:break;default:throw Error(y(163))}}catch(v){A(n,n.return,v)}if(e=n.sibling,e!==null){e.return=n.return,k=e;break}k=n.return}return w=Ri,Ri=!1,w}function Pt(e,n,t){var r=n.updateQueue;if(r=r!==null?r.lastEffect:null,r!==null){var l=r=r.next;do{if((l.tag&e)===e){var o=l.destroy;l.destroy=void 0,o!==void 0&&Lo(n,t,o)}l=l.next}while(l!==r)}}function sl(e,n){if(n=n.updateQueue,n=n!==null?n.lastEffect:null,n!==null){var t=n=n.next;do{if((t.tag&e)===e){var r=t.create;t.destroy=r()}t=t.next}while(t!==n)}}function To(e){var n=e.ref;if(n!==null){var t=e.stateNode;switch(e.tag){case 5:e=t;break;default:e=t}typeof n=="function"?n(e):n.current=e}}function Fa(e){var n=e.alternate;n!==null&&(e.alternate=null,Fa(n)),e.child=null,e.deletions=null,e.sibling=null,e.tag===5&&(n=e.stateNode,n!==null&&(delete n[Ie],delete n[Ut],delete n[vo],delete n[Kf],delete n[Yf])),e.stateNode=null,e.return=null,e.dependencies=null,e.memoizedProps=null,e.memoizedState=null,e.pendingProps=null,e.stateNode=null,e.updateQueue=null}function Ha(e){return e.tag===5||e.tag===3||e.tag===4}function Mi(e){e:for(;;){for(;e.sibling===null;){if(e.return===null||Ha(e.return))return null;e=e.return}for(e.sibling.return=e.return,e=e.sibling;e.tag!==5&&e.tag!==6&&e.tag!==18;){if(e.flags&2||e.child===null||e.tag===4)continue e;e.child.return=e,e=e.child}if(!(e.flags&2))return e.stateNode}}function Ro(e,n,t){var r=e.tag;if(r===5||r===6)e=e.stateNode,n?t.nodeType===8?t.parentNode.insertBefore(e,n):t.insertBefore(e,n):(t.nodeType===8?(n=t.parentNode,n.insertBefore(e,t)):(n=t,n.appendChild(e)),t=t._reactRootContainer,t!=null||n.onclick!==null||(n.onclick=Vr));else if(r!==4&&(e=e.child,e!==null))for(Ro(e,n,t),e=e.sibling;e!==null;)Ro(e,n,t),e=e.sibling}function Mo(e,n,t){var r=e.tag;if(r===5||r===6)e=e.stateNode,n?t.insertBefore(e,n):t.appendChild(e);else if(r!==4&&(e=e.child,e!==null))for(Mo(e,n,t),e=e.sibling;e!==null;)Mo(e,n,t),e=e.sibling}var q=null,Le=!1;function Xe(e,n,t){for(t=t.child;t!==null;)Ua(e,n,t),t=t.sibling}function Ua(e,n,t){if(Ve&&typeof Ve.onCommitFiberUnmount=="function")try{Ve.onCommitFiberUnmount(el,t)}catch{}switch(t.tag){case 5:re||Bn(t,n);case 6:var r=q,l=Le;q=null,Xe(e,n,t),q=r,Le=l,q!==null&&(Le?(e=q,t=t.stateNode,e.nodeType===8?e.parentNode.removeChild(t):e.removeChild(t)):q.removeChild(t.stateNode));break;case 18:q!==null&&(Le?(e=q,t=t.stateNode,e.nodeType===8?Dl(e.parentNode,t):e.nodeType===1&&Dl(e,t),jt(e)):Dl(q,t.stateNode));break;case 4:r=q,l=Le,q=t.stateNode.containerInfo,Le=!0,Xe(e,n,t),q=r,Le=l;break;case 0:case 11:case 14:case 15:if(!re&&(r=t.updateQueue,r!==null&&(r=r.lastEffect,r!==null))){l=r=r.next;do{var o=l,u=o.destroy;o=o.tag,u!==void 0&&(o&2||o&4)&&Lo(t,n,u),l=l.next}while(l!==r)}Xe(e,n,t);break;case 1:if(!re&&(Bn(t,n),r=t.stateNode,typeof r.componentWillUnmount=="function"))try{r.props=t.memoizedProps,r.state=t.memoizedState,r.componentWillUnmount()}catch(i){A(t,n,i)}Xe(e,n,t);break;case 21:Xe(e,n,t);break;case 22:t.mode&1?(re=(r=re)||t.memoizedState!==null,Xe(e,n,t),re=r):Xe(e,n,t);break;default:Xe(e,n,t)}}function Oi(e){var n=e.updateQueue;if(n!==null){e.updateQueue=null;var t=e.stateNode;t===null&&(t=e.stateNode=new ad),n.forEach(function(r){var l=wd.bind(null,e,r);t.has(r)||(t.add(r),r.then(l,l))})}}function Pe(e,n){var t=n.deletions;if(t!==null)for(var r=0;rl&&(l=u),r&=~o}if(r=l,r=Q()-r,r=(120>r?120:480>r?480:1080>r?1080:1920>r?1920:3e3>r?3e3:4320>r?4320:1960*dd(r/1960))-r,10e?16:e,nn===null)var r=!1;else{if(e=nn,nn=null,Gr=0,R&6)throw Error(y(331));var l=R;for(R|=4,k=e.current;k!==null;){var o=k,u=o.child;if(k.flags&16){var i=o.deletions;if(i!==null){for(var s=0;sQ()-Cu?Cn(e,0):Eu|=t),pe(e,n)}function Za(e,n){n===0&&(e.mode&1?(n=or,or<<=1,!(or&130023424)&&(or=4194304)):n=1);var t=ue();e=Ke(e,n),e!==null&&(Zt(e,n,t),pe(e,t))}function gd(e){var n=e.memoizedState,t=0;n!==null&&(t=n.retryLane),Za(e,t)}function wd(e,n){var t=0;switch(e.tag){case 13:var r=e.stateNode,l=e.memoizedState;l!==null&&(t=l.retryLane);break;case 19:r=e.stateNode;break;default:throw Error(y(314))}r!==null&&r.delete(n),Za(e,t)}var Xa;Xa=function(e,n,t){if(e!==null)if(e.memoizedProps!==n.pendingProps||fe.current)ce=!0;else{if(!(e.lanes&t)&&!(n.flags&128))return ce=!1,ud(e,n,t);ce=!!(e.flags&131072)}else ce=!1,V&&n.flags&1048576&&qs(n,$r,n.index);switch(n.lanes=0,n.tag){case 2:var r=n.type;_r(e,n),e=n.pendingProps;var l=qn(n,le.current);Xn(n,t),l=vu(null,n,r,e,l,t);var o=yu();return n.flags|=1,typeof l=="object"&&l!==null&&typeof l.render=="function"&&l.$$typeof===void 0?(n.tag=1,n.memoizedState=null,n.updateQueue=null,de(r)?(o=!0,Hr(n)):o=!1,n.memoizedState=l.state!==null&&l.state!==void 0?l.state:null,fu(n),l.updater=ul,n.stateNode=l,l._reactInternals=n,Eo(n,r,e,t),n=_o(null,n,r,!0,o,t)):(n.tag=0,V&&o&&lu(n),oe(null,n,l,t),n=n.child),n;case 16:r=n.elementType;e:{switch(_r(e,n),e=n.pendingProps,l=r._init,r=l(r._payload),n.type=r,l=n.tag=kd(r),e=ze(r,e),l){case 0:n=xo(null,n,r,e,t);break e;case 1:n=zi(null,n,r,e,t);break e;case 11:n=Ni(null,n,r,e,t);break e;case 14:n=Pi(null,n,r,ze(r.type,e),t);break e}throw Error(y(306,r,""))}return n;case 0:return r=n.type,l=n.pendingProps,l=n.elementType===r?l:ze(r,l),xo(e,n,r,l,t);case 1:return r=n.type,l=n.pendingProps,l=n.elementType===r?l:ze(r,l),zi(e,n,r,l,t);case 3:e:{if(Ma(n),e===null)throw Error(y(387));r=n.pendingProps,o=n.memoizedState,l=o.element,ta(e,n),Wr(n,r,null,t);var u=n.memoizedState;if(r=u.element,o.isDehydrated)if(o={element:r,isDehydrated:!1,cache:u.cache,pendingSuspenseBoundaries:u.pendingSuspenseBoundaries,transitions:u.transitions},n.updateQueue.baseState=o,n.memoizedState=o,n.flags&256){l=tt(Error(y(423)),n),n=Li(e,n,r,t,l);break e}else if(r!==l){l=tt(Error(y(424)),n),n=Li(e,n,r,t,l);break e}else for(he=on(n.stateNode.containerInfo.firstChild),ve=n,V=!0,Te=null,t=ua(n,null,r,t),n.child=t;t;)t.flags=t.flags&-3|4096,t=t.sibling;else{if(bn(),r===l){n=Ye(e,n,t);break e}oe(e,n,r,t)}n=n.child}return n;case 5:return ia(n),e===null&&wo(n),r=n.type,l=n.pendingProps,o=e!==null?e.memoizedProps:null,u=l.children,mo(r,l)?u=null:o!==null&&mo(r,o)&&(n.flags|=32),Ra(e,n),oe(e,n,u,t),n.child;case 6:return e===null&&wo(n),null;case 13:return Oa(e,n,t);case 4:return du(n,n.stateNode.containerInfo),r=n.pendingProps,e===null?n.child=et(n,null,r,t):oe(e,n,r,t),n.child;case 11:return r=n.type,l=n.pendingProps,l=n.elementType===r?l:ze(r,l),Ni(e,n,r,l,t);case 7:return oe(e,n,n.pendingProps,t),n.child;case 8:return oe(e,n,n.pendingProps.children,t),n.child;case 12:return oe(e,n,n.pendingProps.children,t),n.child;case 10:e:{if(r=n.type._context,l=n.pendingProps,o=n.memoizedProps,u=l.value,O(Ar,r._currentValue),r._currentValue=u,o!==null)if(Oe(o.value,u)){if(o.children===l.children&&!fe.current){n=Ye(e,n,t);break e}}else for(o=n.child,o!==null&&(o.return=n);o!==null;){var i=o.dependencies;if(i!==null){u=o.child;for(var s=i.firstContext;s!==null;){if(s.context===r){if(o.tag===1){s=Be(-1,t&-t),s.tag=2;var c=o.updateQueue;if(c!==null){c=c.shared;var h=c.pending;h===null?s.next=s:(s.next=h.next,h.next=s),c.pending=s}}o.lanes|=t,s=o.alternate,s!==null&&(s.lanes|=t),So(o.return,t,n),i.lanes|=t;break}s=s.next}}else if(o.tag===10)u=o.type===n.type?null:o.child;else if(o.tag===18){if(u=o.return,u===null)throw Error(y(341));u.lanes|=t,i=u.alternate,i!==null&&(i.lanes|=t),So(u,t,n),u=o.sibling}else u=o.child;if(u!==null)u.return=o;else for(u=o;u!==null;){if(u===n){u=null;break}if(o=u.sibling,o!==null){o.return=u.return,u=o;break}u=u.return}o=u}oe(e,n,l.children,t),n=n.child}return n;case 9:return l=n.type,r=n.pendingProps.children,Xn(n,t),l=xe(l),r=r(l),n.flags|=1,oe(e,n,r,t),n.child;case 14:return r=n.type,l=ze(r,n.pendingProps),l=ze(r.type,l),Pi(e,n,r,l,t);case 15:return La(e,n,n.type,n.pendingProps,t);case 17:return r=n.type,l=n.pendingProps,l=n.elementType===r?l:ze(r,l),_r(e,n),n.tag=1,de(r)?(e=!0,Hr(n)):e=!1,Xn(n,t),la(n,r,l),Eo(n,r,l,t),_o(null,n,r,!0,e,t);case 19:return Da(e,n,t);case 22:return Ta(e,n,t)}throw Error(y(156,n.tag))};function Ga(e,n){return Es(e,n)}function Sd(e,n,t,r){this.tag=e,this.key=t,this.sibling=this.child=this.return=this.stateNode=this.type=this.elementType=null,this.index=0,this.ref=null,this.pendingProps=n,this.dependencies=this.memoizedState=this.updateQueue=this.memoizedProps=null,this.mode=r,this.subtreeFlags=this.flags=0,this.deletions=null,this.childLanes=this.lanes=0,this.alternate=null}function Ee(e,n,t,r){return new Sd(e,n,t,r)}function Pu(e){return e=e.prototype,!(!e||!e.isReactComponent)}function kd(e){if(typeof e=="function")return Pu(e)?1:0;if(e!=null){if(e=e.$$typeof,e===Ko)return 11;if(e===Yo)return 14}return 2}function cn(e,n){var t=e.alternate;return t===null?(t=Ee(e.tag,n,e.key,e.mode),t.elementType=e.elementType,t.type=e.type,t.stateNode=e.stateNode,t.alternate=e,e.alternate=t):(t.pendingProps=n,t.type=e.type,t.flags=0,t.subtreeFlags=0,t.deletions=null),t.flags=e.flags&14680064,t.childLanes=e.childLanes,t.lanes=e.lanes,t.child=e.child,t.memoizedProps=e.memoizedProps,t.memoizedState=e.memoizedState,t.updateQueue=e.updateQueue,n=e.dependencies,t.dependencies=n===null?null:{lanes:n.lanes,firstContext:n.firstContext},t.sibling=e.sibling,t.index=e.index,t.ref=e.ref,t}function zr(e,n,t,r,l,o){var u=2;if(r=e,typeof e=="function")Pu(e)&&(u=1);else if(typeof e=="string")u=5;else e:switch(e){case Dn:return xn(t.children,l,o,n);case Qo:u=8,l|=8;break;case Ql:return e=Ee(12,t,n,l|2),e.elementType=Ql,e.lanes=o,e;case Kl:return e=Ee(13,t,n,l),e.elementType=Kl,e.lanes=o,e;case Yl:return e=Ee(19,t,n,l),e.elementType=Yl,e.lanes=o,e;case os:return cl(t,l,o,n);default:if(typeof e=="object"&&e!==null)switch(e.$$typeof){case rs:u=10;break e;case ls:u=9;break e;case Ko:u=11;break e;case Yo:u=14;break e;case Ge:u=16,r=null;break e}throw Error(y(130,e==null?e:typeof e,""))}return n=Ee(u,t,n,l),n.elementType=e,n.type=r,n.lanes=o,n}function xn(e,n,t,r){return e=Ee(7,e,r,n),e.lanes=t,e}function cl(e,n,t,r){return e=Ee(22,e,r,n),e.elementType=os,e.lanes=t,e.stateNode={isHidden:!1},e}function Al(e,n,t){return e=Ee(6,e,null,n),e.lanes=t,e}function Bl(e,n,t){return n=Ee(4,e.children!==null?e.children:[],e.key,n),n.lanes=t,n.stateNode={containerInfo:e.containerInfo,pendingChildren:null,implementation:e.implementation},n}function Ed(e,n,t,r,l){this.tag=n,this.containerInfo=e,this.finishedWork=this.pingCache=this.current=this.pendingChildren=null,this.timeoutHandle=-1,this.callbackNode=this.pendingContext=this.context=null,this.callbackPriority=0,this.eventTimes=Cl(0),this.expirationTimes=Cl(-1),this.entangledLanes=this.finishedLanes=this.mutableReadLanes=this.expiredLanes=this.pingedLanes=this.suspendedLanes=this.pendingLanes=0,this.entanglements=Cl(0),this.identifierPrefix=r,this.onRecoverableError=l,this.mutableSourceEagerHydrationData=null}function zu(e,n,t,r,l,o,u,i,s){return e=new Ed(e,n,t,i,s),n===1?(n=1,o===!0&&(n|=8)):n=0,o=Ee(3,null,null,n),e.current=o,o.stateNode=e,o.memoizedState={element:r,isDehydrated:t,cache:null,transitions:null,pendingSuspenseBoundaries:null},fu(o),e}function Cd(e,n,t){var r=3"u"||typeof __REACT_DEVTOOLS_GLOBAL_HOOK__.checkDCE!="function"))try{__REACT_DEVTOOLS_GLOBAL_HOOK__.checkDCE(ec)}catch(e){console.error(e)}}ec(),qi.exports=ge;var zd=qi.exports,nc,$i=zd;nc=$i.createRoot,$i.hydrateRoot;function Ld(){return $.jsxs("svg",{width:"137",height:"40",viewBox:"0 0 137 40",fill:"none",xmlns:"http://www.w3.org/2000/svg",children:[$.jsx("path",{d:"M24.1831 16.0007H16.1225V24.0004H24.1831V16.0007Z",fill:"#161616"}),$.jsx("path",{d:"M32.2436 5.44985V0H8.06062V5.44985C8.06062 6.8587 6.91086 7.99978 5.4913 7.99978H0V32.0002H5.4913C6.91086 32.0002 8.06062 33.1413 8.06062 34.5502V40H32.2436V34.5502C32.2436 33.1413 33.3934 32.0002 34.8129 32.0002H40.3042V7.99978H34.8129C33.3934 7.99978 32.2436 6.8587 32.2436 5.44985ZM32.2436 29.4492C32.2436 30.858 31.0939 31.9991 29.6743 31.9991H10.6311C9.2115 31.9991 8.06174 30.858 8.06174 29.4492V10.5497C8.06174 9.14086 9.2115 7.99978 10.6311 7.99978H29.6743C31.0939 7.99978 32.2436 9.14086 32.2436 10.5497V29.4492Z",fill:"#161616"}),$.jsx("path",{d:"M64.0092 7.99974H60.4546V31.9991H76.2523V28.6047H64.0092V7.99974Z",fill:"#161616"}),$.jsx("path",{d:"M86.5004 15.0661H85.2364C81.4368 15.0661 77.6035 17.3783 77.6035 22.5426V25.0525C77.6035 29.7335 80.3329 32.529 84.9039 32.529H86.834C90.6908 32.529 93.4348 30.2757 93.9979 26.6469L94.0472 26.3269H90.3863L90.3258 26.5247C89.784 28.3046 88.3678 29.1346 85.869 29.1346C82.6257 29.1346 81.0953 27.7047 81.0584 24.637H94.1334V22.5426C94.1334 17.3783 90.3001 15.0661 86.5004 15.0661ZM81.1636 21.6371C81.5263 19.386 82.9134 18.4605 85.8679 18.4605C88.8223 18.4605 90.2083 19.386 90.571 21.6371H81.1636Z",fill:"#161616"}),$.jsx("path",{d:"M101.226 7.99974H97.6722V15.0662H95.31V18.4606H97.6722V25.1837C97.6722 31.1135 101.307 31.9991 103.475 31.9991H105.717V28.6047H104.44C102.157 28.6047 101.226 27.4603 101.226 24.6559V18.4617H105.717V15.0673H101.226V7.99974Z",fill:"#161616"}),$.jsx("path",{d:"M113.234 7.99974H109.681V15.0662H107.318V18.4606H109.681V25.1837C109.681 31.1135 113.316 31.9991 115.483 31.9991H117.726V28.6047H116.448C114.165 28.6047 113.234 27.4603 113.234 24.6559V18.4617H117.726V15.0673H113.234V7.99974Z",fill:"#161616"}),$.jsx("path",{d:"M136.034 28.6046C135.33 28.6046 135.016 28.3135 135.016 27.6602V21.8815C135.016 15.9517 131.381 15.0661 129.214 15.0661H125.954C123.135 15.0661 120.118 17.115 120.118 20.1649V20.4426H123.671V20.1649C123.671 19.2249 124.83 18.4616 126.253 18.4616H128.249C130.799 18.4616 131.35 19.3727 131.452 21.4071H126.319C122.35 21.4071 119.684 23.5092 119.684 26.638V27.0014C119.684 28.6535 120.33 32.4967 126.319 32.4967C127.848 32.4967 130.52 32.2312 131.958 30.5379C132.829 32.0012 134.664 32.0012 136.034 32.0012H136.314V28.6069H136.034V28.6046ZM131.462 26.8014C131.462 28.6869 128.446 29.0991 127.283 29.0991C123.898 29.0991 123.237 28.2802 123.237 26.8669C123.237 25.2981 124.636 24.4692 127.283 24.4692H131.462V26.8014Z",fill:"#161616"})]})}function Td(){return $.jsx("svg",{width:"16",height:"13",viewBox:"0 0 16 13",fill:"none",xmlns:"http://www.w3.org/2000/svg",children:$.jsx("path",{d:"M14.4373 2.55366V5.21163H13.2678V3.332H12.4534V2.41123H11.4604V0H8.97894V1.94985H7.01906V0H4.53761V2.41123H3.54463V3.332H2.73019V5.21163H1.56068V2.55366H0V6.94885H0.850552V7.65697H1.7011V9.35807H3.96991V10.7222H2.48144V12.4774H4.4674V10.5978H6.52357V8.9669H9.47643V10.5978H11.5326V12.4774H13.5186V10.7222H12.0301V9.35807H14.2989V7.65697H15.1494V6.94885H16V2.55366H14.4393H14.4373ZM6.56971 7.12738H5.32798V5.001H6.56971V7.12738ZM10.668 7.12738H9.42628V5.001H10.668V7.12738Z",fill:"#FDFEFF"})})}function Rd(){return $.jsx("div",{className:"fixed bg-white w-[100dvw] p-0 h-[100dvh] flex items-center justify-center",children:$.jsxs("div",{className:"max-w-[893px] w-full border p-10 flex flex-col gap-5",children:[$.jsx(Ld,{}),$.jsxs("div",{className:"flex gap-2 text-black flex-col max-w-[600px]",children:[$.jsx("h1",{className:"font-semibold text-3xl",children:"Experience the new ADE"}),$.jsx("h3",{className:"text-lg",children:"We have launched the next-generation Agent Development Environment (ADE) for interacting with agents both in the cloud and locally."}),$.jsx("p",{className:"mt-10",children:"The old Letta chat UI is no longer supported past Letta version 0.5.0. To use the old chat interface, please downgrade your Letta version."}),$.jsx("div",{className:"flex mt-3",children:$.jsxs("a",{href:"https://app.letta.com",className:"bg-black flex gap-3 items-center px-4 py-3 text-white text-bold",children:[$.jsx(Td,{}),"Open the new ADE"]})})]})]})})}const Md=nc(document.getElementById("root"));Md.render($.jsx($o.StrictMode,{children:$.jsx(Rd,{})})); diff --git a/letta/server/static_files/assets/index-0e31b727.css b/letta/server/static_files/assets/index-0e31b727.css deleted file mode 100644 index c7362ffd..00000000 --- a/letta/server/static_files/assets/index-0e31b727.css +++ /dev/null @@ -1 +0,0 @@ -*,:before,:after{box-sizing:border-box;border-width:0;border-style:solid;border-color:#e5e7eb}:before,:after{--tw-content: ""}html,:host{line-height:1.5;-webkit-text-size-adjust:100%;-moz-tab-size:4;-o-tab-size:4;tab-size:4;font-family:ui-sans-serif,system-ui,sans-serif,"Apple Color Emoji","Segoe UI Emoji",Segoe UI Symbol,"Noto Color Emoji";font-feature-settings:normal;font-variation-settings:normal;-webkit-tap-highlight-color:transparent}body{margin:0;line-height:inherit}hr{height:0;color:inherit;border-top-width:1px}abbr:where([title]){-webkit-text-decoration:underline dotted;text-decoration:underline dotted}h1,h2,h3,h4,h5,h6{font-size:inherit;font-weight:inherit}a{color:inherit;text-decoration:inherit}b,strong{font-weight:bolder}code,kbd,samp,pre{font-family:ui-monospace,SFMono-Regular,Menlo,Monaco,Consolas,Liberation Mono,Courier New,monospace;font-feature-settings:normal;font-variation-settings:normal;font-size:1em}small{font-size:80%}sub,sup{font-size:75%;line-height:0;position:relative;vertical-align:baseline}sub{bottom:-.25em}sup{top:-.5em}table{text-indent:0;border-color:inherit;border-collapse:collapse}button,input,optgroup,select,textarea{font-family:inherit;font-feature-settings:inherit;font-variation-settings:inherit;font-size:100%;font-weight:inherit;line-height:inherit;color:inherit;margin:0;padding:0}button,select{text-transform:none}button,[type=button],[type=reset],[type=submit]{-webkit-appearance:button;background-color:transparent;background-image:none}:-moz-focusring{outline:auto}:-moz-ui-invalid{box-shadow:none}progress{vertical-align:baseline}::-webkit-inner-spin-button,::-webkit-outer-spin-button{height:auto}[type=search]{-webkit-appearance:textfield;outline-offset:-2px}::-webkit-search-decoration{-webkit-appearance:none}::-webkit-file-upload-button{-webkit-appearance:button;font:inherit}summary{display:list-item}blockquote,dl,dd,h1,h2,h3,h4,h5,h6,hr,figure,p,pre{margin:0}fieldset{margin:0;padding:0}legend{padding:0}ol,ul,menu{list-style:none;margin:0;padding:0}dialog{padding:0}textarea{resize:vertical}input::-moz-placeholder,textarea::-moz-placeholder{opacity:1;color:#9ca3af}input::placeholder,textarea::placeholder{opacity:1;color:#9ca3af}button,[role=button]{cursor:pointer}:disabled{cursor:default}img,svg,video,canvas,audio,iframe,embed,object{display:block;vertical-align:middle}img,video{max-width:100%;height:auto}[hidden]{display:none}:root{--background: 210, 10%, 92%;--background-lighter: 0, 0%, 100%;--background-darker: 210, 6%, 86%;--foreground: 224 71.4% 4.1%;--card: 0 0% 100%;--card-foreground: 224 71.4% 4.1%;--popover: 0 0% 100%;--popover-foreground: 224 71.4% 4.1%;--brand: 220.9 39.3% 11%;--brand-foreground: 210 20% 98%;--primary: 240, 92%, 35%;--primary-foreground: 0, 0%, 100%;--muted: 220 14.3% 95.9%;--muted-foreground: 220 8.9% 46.1%;--accent: 220 14.3% 95.9%;--accent-foreground: 220.9 39.3% 11%;--destructive: 0 84.2% 60.2%;--destructive-foreground: 210 20% 98%;--border: 210, 6%, 86%;--input: 210, 6%, 86%;--ring: 224 71.4% 4.1%;--radius: .5rem}.dark{--background: 224 71.4% 4.1%;--background-lighter: 224 71.4% 4.1%;--background-darker: 224 71.4% 4.1%;--foreground: 210 20% 98%;--card: 224 71.4% 4.1%;--card-foreground: 210 20% 98%;--popover: 224 71.4% 4.1%;--popover-foreground: 210 20% 98%;--brand: 210 20% 98%;--brand-foreground: 220.9 39.3% 11%;--primary: 10, 100%, 60%;--primary-foreground: 210 20% 98%;--muted: 215 27.9% 16.9%;--muted-foreground: 217.9 10.6% 64.9%;--accent: 215 27.9% 16.9%;--accent-foreground: 210 20% 98%;--destructive: 0 62.8% 30.6%;--destructive-foreground: 210 20% 98%;--border: 215 27.9% 16.9%;--input: 215 27.9% 16.9%;--ring: 216 12.2% 83.9%}*{border-color:hsl(var(--border))}html{height:100%}body{height:100%;width:100%;background-color:hsl(var(--background));color:hsl(var(--foreground));-webkit-font-smoothing:antialiased;-moz-osx-font-smoothing:grayscale}input::file-selector-button{color:hsl(var(--foreground))}*,:before,:after{--tw-border-spacing-x: 0;--tw-border-spacing-y: 0;--tw-translate-x: 0;--tw-translate-y: 0;--tw-rotate: 0;--tw-skew-x: 0;--tw-skew-y: 0;--tw-scale-x: 1;--tw-scale-y: 1;--tw-pan-x: ;--tw-pan-y: ;--tw-pinch-zoom: ;--tw-scroll-snap-strictness: proximity;--tw-gradient-from-position: ;--tw-gradient-via-position: ;--tw-gradient-to-position: ;--tw-ordinal: ;--tw-slashed-zero: ;--tw-numeric-figure: ;--tw-numeric-spacing: ;--tw-numeric-fraction: ;--tw-ring-inset: ;--tw-ring-offset-width: 0px;--tw-ring-offset-color: #fff;--tw-ring-color: rgb(59 130 246 / .5);--tw-ring-offset-shadow: 0 0 #0000;--tw-ring-shadow: 0 0 #0000;--tw-shadow: 0 0 #0000;--tw-shadow-colored: 0 0 #0000;--tw-blur: ;--tw-brightness: ;--tw-contrast: ;--tw-grayscale: ;--tw-hue-rotate: ;--tw-invert: ;--tw-saturate: ;--tw-sepia: ;--tw-drop-shadow: ;--tw-backdrop-blur: ;--tw-backdrop-brightness: ;--tw-backdrop-contrast: ;--tw-backdrop-grayscale: ;--tw-backdrop-hue-rotate: ;--tw-backdrop-invert: ;--tw-backdrop-opacity: ;--tw-backdrop-saturate: ;--tw-backdrop-sepia: }::backdrop{--tw-border-spacing-x: 0;--tw-border-spacing-y: 0;--tw-translate-x: 0;--tw-translate-y: 0;--tw-rotate: 0;--tw-skew-x: 0;--tw-skew-y: 0;--tw-scale-x: 1;--tw-scale-y: 1;--tw-pan-x: ;--tw-pan-y: ;--tw-pinch-zoom: ;--tw-scroll-snap-strictness: proximity;--tw-gradient-from-position: ;--tw-gradient-via-position: ;--tw-gradient-to-position: ;--tw-ordinal: ;--tw-slashed-zero: ;--tw-numeric-figure: ;--tw-numeric-spacing: ;--tw-numeric-fraction: ;--tw-ring-inset: ;--tw-ring-offset-width: 0px;--tw-ring-offset-color: #fff;--tw-ring-color: rgb(59 130 246 / .5);--tw-ring-offset-shadow: 0 0 #0000;--tw-ring-shadow: 0 0 #0000;--tw-shadow: 0 0 #0000;--tw-shadow-colored: 0 0 #0000;--tw-blur: ;--tw-brightness: ;--tw-contrast: ;--tw-grayscale: ;--tw-hue-rotate: ;--tw-invert: ;--tw-saturate: ;--tw-sepia: ;--tw-drop-shadow: ;--tw-backdrop-blur: ;--tw-backdrop-brightness: ;--tw-backdrop-contrast: ;--tw-backdrop-grayscale: ;--tw-backdrop-hue-rotate: ;--tw-backdrop-invert: ;--tw-backdrop-opacity: ;--tw-backdrop-saturate: ;--tw-backdrop-sepia: }.fixed{position:fixed}.mt-10{margin-top:2.5rem}.mt-3{margin-top:.75rem}.flex{display:flex}.h-\[100dvh\]{height:100dvh}.h-full{height:100%}.w-\[100dvw\]{width:100dvw}.w-full{width:100%}.max-w-\[600px\]{max-width:600px}.max-w-\[893px\]{max-width:893px}.flex-col{flex-direction:column}.items-center{align-items:center}.justify-center{justify-content:center}.gap-2{gap:.5rem}.gap-3{gap:.75rem}.gap-5{gap:1.25rem}.border{border-width:1px}.bg-black{--tw-bg-opacity: 1;background-color:rgb(0 0 0 / var(--tw-bg-opacity))}.bg-white{--tw-bg-opacity: 1;background-color:rgb(255 255 255 / var(--tw-bg-opacity))}.p-0{padding:0}.p-10{padding:2.5rem}.px-4{padding-left:1rem;padding-right:1rem}.py-3{padding-top:.75rem;padding-bottom:.75rem}.text-3xl{font-size:1.875rem;line-height:2.25rem}.text-lg{font-size:1.125rem;line-height:1.75rem}.font-semibold{font-weight:600}.text-black{--tw-text-opacity: 1;color:rgb(0 0 0 / var(--tw-text-opacity))}.text-white{--tw-text-opacity: 1;color:rgb(255 255 255 / var(--tw-text-opacity))}@keyframes enter{0%{opacity:var(--tw-enter-opacity, 1);transform:translate3d(var(--tw-enter-translate-x, 0),var(--tw-enter-translate-y, 0),0) scale3d(var(--tw-enter-scale, 1),var(--tw-enter-scale, 1),var(--tw-enter-scale, 1)) rotate(var(--tw-enter-rotate, 0))}}@keyframes exit{to{opacity:var(--tw-exit-opacity, 1);transform:translate3d(var(--tw-exit-translate-x, 0),var(--tw-exit-translate-y, 0),0) scale3d(var(--tw-exit-scale, 1),var(--tw-exit-scale, 1),var(--tw-exit-scale, 1)) rotate(var(--tw-exit-rotate, 0))}}.PopoverContent{width:var(--radix-popover-trigger-width);max-height:var(--radix-popover-content-available-height)} diff --git a/letta/server/static_files/favicon.ico b/letta/server/static_files/favicon.ico deleted file mode 100644 index a227115c..00000000 Binary files a/letta/server/static_files/favicon.ico and /dev/null differ diff --git a/letta/server/static_files/index.html b/letta/server/static_files/index.html deleted file mode 100644 index c7fb2c37..00000000 --- a/letta/server/static_files/index.html +++ /dev/null @@ -1,39 +0,0 @@ - - - - - Letta - - - - - - - - - - -
- - - diff --git a/letta/server/static_files/memgpt_logo_transparent.png b/letta/server/static_files/memgpt_logo_transparent.png deleted file mode 100644 index 92464439..00000000 Binary files a/letta/server/static_files/memgpt_logo_transparent.png and /dev/null differ diff --git a/letta/server/ws_api/example_client.py b/letta/server/ws_api/example_client.py index a7fc57b5..447600d3 100644 --- a/letta/server/ws_api/example_client.py +++ b/letta/server/ws_api/example_client.py @@ -3,6 +3,7 @@ import asyncio import websockets import letta.server.ws_api.protocol as protocol +from letta.helpers.json_helpers import json_dumps, json_loads from letta.server.constants import WS_CLIENT_TIMEOUT, WS_DEFAULT_PORT from letta.server.utils import condition_to_stop_receiving, print_server_response diff --git a/letta/server/ws_api/server.py b/letta/server/ws_api/server.py index 85edf515..80e2b369 100644 --- a/letta/server/ws_api/server.py +++ b/letta/server/ws_api/server.py @@ -1,11 +1,11 @@ import asyncio import signal import sys -import traceback import websockets import letta.server.ws_api.protocol as protocol +from letta.helpers.json_helpers import json_loads from letta.log import get_logger from letta.server.constants import WS_DEFAULT_PORT from letta.server.server import SyncServer @@ -53,7 +53,7 @@ class WebSocketServer: # Assuming the message is a JSON string try: data = json_loads(message) - except: + except Exception: print(f"[server] bad data from client:\n{data}") await websocket.send(protocol.server_command_response(f"Error: bad data from client - {str(data)}")) continue diff --git a/letta/services/agent_file_manager.py b/letta/services/agent_file_manager.py deleted file mode 100644 index 78f34b89..00000000 --- a/letta/services/agent_file_manager.py +++ /dev/null @@ -1,685 +0,0 @@ -from datetime import datetime, timezone -from typing import Dict, List - -from letta.errors import AgentFileExportError, AgentFileImportError -from letta.log import get_logger -from letta.schemas.agent import AgentState, CreateAgent -from letta.schemas.agent_file import ( - AgentFileSchema, - AgentSchema, - BlockSchema, - FileAgentSchema, - FileSchema, - GroupSchema, - ImportResult, - MessageSchema, - SourceSchema, - ToolSchema, -) -from letta.schemas.block import Block -from letta.schemas.file import FileMetadata -from letta.schemas.message import Message -from letta.schemas.source import Source -from letta.schemas.tool import Tool -from letta.schemas.user import User -from letta.services.agent_manager import AgentManager -from letta.services.block_manager import BlockManager -from letta.services.file_manager import FileManager -from letta.services.file_processor.embedder.base_embedder import BaseEmbedder -from letta.services.file_processor.file_processor import FileProcessor -from letta.services.file_processor.parser.mistral_parser import MistralFileParser -from letta.services.files_agents_manager import FileAgentManager -from letta.services.group_manager import GroupManager -from letta.services.mcp_manager import MCPManager -from letta.services.message_manager import MessageManager -from letta.services.source_manager import SourceManager -from letta.services.tool_manager import ToolManager -from letta.utils import get_latest_alembic_revision - -logger = get_logger(__name__) - - -class AgentFileManager: - """ - Manages export and import of agent files between database and AgentFileSchema format. - - Handles: - - ID mapping between database IDs and human-readable file IDs - - Coordination across multiple entity managers - - Transaction safety during imports - - Referential integrity validation - """ - - def __init__( - self, - agent_manager: AgentManager, - tool_manager: ToolManager, - source_manager: SourceManager, - block_manager: BlockManager, - group_manager: GroupManager, - mcp_manager: MCPManager, - file_manager: FileManager, - file_agent_manager: FileAgentManager, - message_manager: MessageManager, - embedder: BaseEmbedder, - file_parser: MistralFileParser, - using_pinecone: bool = False, - ): - self.agent_manager = agent_manager - self.tool_manager = tool_manager - self.source_manager = source_manager - self.block_manager = block_manager - self.group_manager = group_manager - self.mcp_manager = mcp_manager - self.file_manager = file_manager - self.file_agent_manager = file_agent_manager - self.message_manager = message_manager - self.embedder = embedder - self.file_parser = file_parser - self.using_pinecone = using_pinecone - - # ID mapping state for export - self._db_to_file_ids: Dict[str, str] = {} - - # Counters for generating Stripe-style IDs - self._id_counters: Dict[str, int] = { - AgentSchema.__id_prefix__: 0, - GroupSchema.__id_prefix__: 0, - BlockSchema.__id_prefix__: 0, - FileSchema.__id_prefix__: 0, - SourceSchema.__id_prefix__: 0, - ToolSchema.__id_prefix__: 0, - MessageSchema.__id_prefix__: 0, - FileAgentSchema.__id_prefix__: 0, - # MCPServerSchema.__id_prefix__: 0, - } - - def _reset_state(self): - """Reset internal state for a new operation""" - self._db_to_file_ids.clear() - for key in self._id_counters: - self._id_counters[key] = 0 - - def _generate_file_id(self, entity_type: str) -> str: - """Generate a Stripe-style ID for the given entity type""" - counter = self._id_counters[entity_type] - file_id = f"{entity_type}-{counter}" - self._id_counters[entity_type] += 1 - return file_id - - def _map_db_to_file_id(self, db_id: str, entity_type: str, allow_new: bool = True) -> str: - """Map a database UUID to a file ID, creating if needed (export only)""" - if db_id in self._db_to_file_ids: - return self._db_to_file_ids[db_id] - - if not allow_new: - raise AgentFileExportError( - f"Unexpected new {entity_type} ID '{db_id}' encountered during conversion. " - f"All IDs should have been mapped during agent processing." - ) - - file_id = self._generate_file_id(entity_type) - self._db_to_file_ids[db_id] = file_id - return file_id - - def _extract_unique_tools(self, agent_states: List[AgentState]) -> List: - """Extract unique tools across all agent states by ID""" - all_tools = [] - for agent_state in agent_states: - if agent_state.tools: - all_tools.extend(agent_state.tools) - - unique_tools = {} - for tool in all_tools: - unique_tools[tool.id] = tool - - return sorted(unique_tools.values(), key=lambda x: x.name) - - def _extract_unique_blocks(self, agent_states: List[AgentState]) -> List: - """Extract unique blocks across all agent states by ID""" - all_blocks = [] - for agent_state in agent_states: - if agent_state.memory and agent_state.memory.blocks: - all_blocks.extend(agent_state.memory.blocks) - - unique_blocks = {} - for block in all_blocks: - unique_blocks[block.id] = block - - return sorted(unique_blocks.values(), key=lambda x: x.label) - - async def _extract_unique_sources_and_files_from_agents( - self, agent_states: List[AgentState], actor: User, files_agents_cache: dict = None - ) -> tuple[List[Source], List[FileMetadata]]: - """Extract unique sources and files from agent states using bulk operations""" - - all_source_ids = set() - all_file_ids = set() - - for agent_state in agent_states: - files_agents = await self.file_agent_manager.list_files_for_agent( - agent_id=agent_state.id, actor=actor, is_open_only=False, return_as_blocks=False - ) - # cache the results for reuse during conversion - if files_agents_cache is not None: - files_agents_cache[agent_state.id] = files_agents - - for file_agent in files_agents: - all_source_ids.add(file_agent.source_id) - all_file_ids.add(file_agent.file_id) - sources = await self.source_manager.get_sources_by_ids_async(list(all_source_ids), actor) - files = await self.file_manager.get_files_by_ids_async(list(all_file_ids), actor, include_content=True) - - return sources, files - - async def _convert_agent_state_to_schema(self, agent_state: AgentState, actor: User, files_agents_cache: dict = None) -> AgentSchema: - """Convert AgentState to AgentSchema with ID remapping""" - - agent_file_id = self._map_db_to_file_id(agent_state.id, AgentSchema.__id_prefix__) - - # use cached file-agent data if available, otherwise fetch - if files_agents_cache is not None and agent_state.id in files_agents_cache: - files_agents = files_agents_cache[agent_state.id] - else: - files_agents = await self.file_agent_manager.list_files_for_agent( - agent_id=agent_state.id, actor=actor, is_open_only=False, return_as_blocks=False - ) - agent_schema = await AgentSchema.from_agent_state( - agent_state, message_manager=self.message_manager, files_agents=files_agents, actor=actor - ) - agent_schema.id = agent_file_id - - if agent_schema.messages: - for message in agent_schema.messages: - message_file_id = self._map_db_to_file_id(message.id, MessageSchema.__id_prefix__) - message.id = message_file_id - message.agent_id = agent_file_id - - if agent_schema.in_context_message_ids: - agent_schema.in_context_message_ids = [ - self._map_db_to_file_id(message_id, MessageSchema.__id_prefix__, allow_new=False) - for message_id in agent_schema.in_context_message_ids - ] - - if agent_schema.tool_ids: - agent_schema.tool_ids = [self._map_db_to_file_id(tool_id, ToolSchema.__id_prefix__) for tool_id in agent_schema.tool_ids] - - if agent_schema.source_ids: - agent_schema.source_ids = [ - self._map_db_to_file_id(source_id, SourceSchema.__id_prefix__) for source_id in agent_schema.source_ids - ] - - if agent_schema.block_ids: - agent_schema.block_ids = [self._map_db_to_file_id(block_id, BlockSchema.__id_prefix__) for block_id in agent_schema.block_ids] - - if agent_schema.files_agents: - for file_agent in agent_schema.files_agents: - file_agent.file_id = self._map_db_to_file_id(file_agent.file_id, FileSchema.__id_prefix__) - file_agent.source_id = self._map_db_to_file_id(file_agent.source_id, SourceSchema.__id_prefix__) - file_agent.agent_id = agent_file_id - - return agent_schema - - def _convert_tool_to_schema(self, tool) -> ToolSchema: - """Convert Tool to ToolSchema with ID remapping""" - tool_file_id = self._map_db_to_file_id(tool.id, ToolSchema.__id_prefix__, allow_new=False) - tool_schema = ToolSchema.from_tool(tool) - tool_schema.id = tool_file_id - return tool_schema - - def _convert_block_to_schema(self, block) -> BlockSchema: - """Convert Block to BlockSchema with ID remapping""" - block_file_id = self._map_db_to_file_id(block.id, BlockSchema.__id_prefix__, allow_new=False) - block_schema = BlockSchema.from_block(block) - block_schema.id = block_file_id - return block_schema - - def _convert_source_to_schema(self, source) -> SourceSchema: - """Convert Source to SourceSchema with ID remapping""" - source_file_id = self._map_db_to_file_id(source.id, SourceSchema.__id_prefix__, allow_new=False) - source_schema = SourceSchema.from_source(source) - source_schema.id = source_file_id - return source_schema - - def _convert_file_to_schema(self, file_metadata) -> FileSchema: - """Convert FileMetadata to FileSchema with ID remapping""" - file_file_id = self._map_db_to_file_id(file_metadata.id, FileSchema.__id_prefix__, allow_new=False) - file_schema = FileSchema.from_file_metadata(file_metadata) - file_schema.id = file_file_id - file_schema.source_id = self._map_db_to_file_id(file_metadata.source_id, SourceSchema.__id_prefix__, allow_new=False) - return file_schema - - async def export(self, agent_ids: List[str], actor: User) -> AgentFileSchema: - """ - Export agents and their related entities to AgentFileSchema format. - - Args: - agent_ids: List of agent UUIDs to export - - Returns: - AgentFileSchema with all related entities - - Raises: - AgentFileExportError: If export fails - """ - try: - self._reset_state() - - agent_states = await self.agent_manager.get_agents_by_ids_async(agent_ids=agent_ids, actor=actor) - - # Validate that all requested agents were found - if len(agent_states) != len(agent_ids): - found_ids = {agent.id for agent in agent_states} - missing_ids = [agent_id for agent_id in agent_ids if agent_id not in found_ids] - raise AgentFileExportError(f"The following agent IDs were not found: {missing_ids}") - - # cache for file-agent relationships to avoid duplicate queries - files_agents_cache = {} # Maps agent_id to list of file_agent relationships - - # Extract unique entities across all agents - tool_set = self._extract_unique_tools(agent_states) - block_set = self._extract_unique_blocks(agent_states) - - # Extract sources and files from agent states BEFORE conversion (with caching) - source_set, file_set = await self._extract_unique_sources_and_files_from_agents(agent_states, actor, files_agents_cache) - - # Convert to schemas with ID remapping (reusing cached file-agent data) - agent_schemas = [ - await self._convert_agent_state_to_schema(agent_state, actor=actor, files_agents_cache=files_agents_cache) - for agent_state in agent_states - ] - tool_schemas = [self._convert_tool_to_schema(tool) for tool in tool_set] - block_schemas = [self._convert_block_to_schema(block) for block in block_set] - source_schemas = [self._convert_source_to_schema(source) for source in source_set] - file_schemas = [self._convert_file_to_schema(file_metadata) for file_metadata in file_set] - - logger.info(f"Exporting {len(agent_ids)} agents to agent file format") - - # Return AgentFileSchema with converted entities - return AgentFileSchema( - agents=agent_schemas, - groups=[], # TODO: Extract and convert groups - blocks=block_schemas, - files=file_schemas, - sources=source_schemas, - tools=tool_schemas, - # mcp_servers=[], # TODO: Extract and convert MCP servers - metadata={"revision_id": await get_latest_alembic_revision()}, - created_at=datetime.now(timezone.utc), - ) - - except Exception as e: - logger.error(f"Failed to export agent file: {e}") - raise AgentFileExportError(f"Export failed: {e}") from e - - async def import_file(self, schema: AgentFileSchema, actor: User, dry_run: bool = False) -> ImportResult: - """ - Import AgentFileSchema into the database. - - Args: - schema: The agent file schema to import - dry_run: If True, validate but don't commit changes - - Returns: - ImportResult with success status and details - - Raises: - AgentFileImportError: If import fails - """ - try: - self._reset_state() - - if dry_run: - logger.info("Starting dry run import validation") - else: - logger.info("Starting agent file import") - - # Validate schema first - self._validate_schema(schema) - - if dry_run: - return ImportResult( - success=True, - message="Dry run validation passed", - imported_count=0, - ) - - # Import in dependency order - imported_count = 0 - file_to_db_ids = {} # Maps file IDs to new database IDs - # in-memory cache for file metadata to avoid repeated db calls - file_metadata_cache = {} # Maps database file ID to FileMetadata - - # 1. Create tools first (no dependencies) - using bulk upsert for efficiency - if schema.tools: - # convert tool schemas to pydantic tools - pydantic_tools = [] - for tool_schema in schema.tools: - pydantic_tools.append(Tool(**tool_schema.model_dump(exclude={"id"}))) - - # bulk upsert all tools at once - created_tools = await self.tool_manager.bulk_upsert_tools_async(pydantic_tools, actor) - - # map file ids to database ids - # note: tools are matched by name during upsert, so we need to match by name here too - created_tools_by_name = {tool.name: tool for tool in created_tools} - for tool_schema in schema.tools: - created_tool = created_tools_by_name.get(tool_schema.name) - if created_tool: - file_to_db_ids[tool_schema.id] = created_tool.id - imported_count += 1 - else: - logger.warning(f"Tool {tool_schema.name} was not created during bulk upsert") - - # 2. Create blocks (no dependencies) - using batch create for efficiency - if schema.blocks: - # convert block schemas to pydantic blocks (excluding IDs to create new blocks) - pydantic_blocks = [] - for block_schema in schema.blocks: - pydantic_blocks.append(Block(**block_schema.model_dump(exclude={"id"}))) - - # batch create all blocks at once - created_blocks = await self.block_manager.batch_create_blocks_async(pydantic_blocks, actor) - - # map file ids to database ids - for block_schema, created_block in zip(schema.blocks, created_blocks): - file_to_db_ids[block_schema.id] = created_block.id - imported_count += 1 - - # 3. Create sources (no dependencies) - using bulk upsert for efficiency - if schema.sources: - # convert source schemas to pydantic sources - pydantic_sources = [] - for source_schema in schema.sources: - source_data = source_schema.model_dump(exclude={"id", "embedding", "embedding_chunk_size"}) - pydantic_sources.append(Source(**source_data)) - - # bulk upsert all sources at once - created_sources = await self.source_manager.bulk_upsert_sources_async(pydantic_sources, actor) - - # map file ids to database ids - # note: sources are matched by name during upsert, so we need to match by name here too - created_sources_by_name = {source.name: source for source in created_sources} - for source_schema in schema.sources: - created_source = created_sources_by_name.get(source_schema.name) - if created_source: - file_to_db_ids[source_schema.id] = created_source.id - imported_count += 1 - else: - logger.warning(f"Source {source_schema.name} was not created during bulk upsert") - - # 4. Create files (depends on sources) - for file_schema in schema.files: - # Convert FileSchema back to FileMetadata - file_data = file_schema.model_dump(exclude={"id", "content"}) - # Remap source_id from file ID to database ID - file_data["source_id"] = file_to_db_ids[file_schema.source_id] - file_metadata = FileMetadata(**file_data) - created_file = await self.file_manager.create_file(file_metadata, actor, text=file_schema.content) - file_to_db_ids[file_schema.id] = created_file.id - imported_count += 1 - - # 5. Process files for chunking/embedding (depends on files and sources) - file_processor = FileProcessor( - file_parser=self.file_parser, - embedder=self.embedder, - actor=actor, - using_pinecone=self.using_pinecone, - ) - - for file_schema in schema.files: - if file_schema.content: # Only process files with content - file_db_id = file_to_db_ids[file_schema.id] - source_db_id = file_to_db_ids[file_schema.source_id] - - # Get the created file metadata (with caching) - if file_db_id not in file_metadata_cache: - file_metadata_cache[file_db_id] = await self.file_manager.get_file_by_id(file_db_id, actor) - file_metadata = file_metadata_cache[file_db_id] - - # Save the db call of fetching content again - file_metadata.content = file_schema.content - - # Process the file for chunking/embedding - passages = await file_processor.process_imported_file(file_metadata=file_metadata, source_id=source_db_id) - imported_count += len(passages) - - # 6. Create agents with empty message history - for agent_schema in schema.agents: - # Convert AgentSchema back to CreateAgent, remapping tool/block IDs - agent_data = agent_schema.model_dump(exclude={"id", "in_context_message_ids", "messages"}) - - # Remap tool_ids from file IDs to database IDs - if agent_data.get("tool_ids"): - agent_data["tool_ids"] = [file_to_db_ids[file_id] for file_id in agent_data["tool_ids"]] - - # Remap block_ids from file IDs to database IDs - if agent_data.get("block_ids"): - agent_data["block_ids"] = [file_to_db_ids[file_id] for file_id in agent_data["block_ids"]] - - agent_create = CreateAgent(**agent_data) - created_agent = await self.agent_manager.create_agent_async(agent_create, actor, _init_with_no_messages=True) - file_to_db_ids[agent_schema.id] = created_agent.id - imported_count += 1 - - # 7. Create messages and update agent message_ids - for agent_schema in schema.agents: - agent_db_id = file_to_db_ids[agent_schema.id] - message_file_to_db_ids = {} - - # Create messages for this agent - messages = [] - for message_schema in agent_schema.messages: - # Convert MessageSchema back to Message, setting agent_id to new DB ID - message_data = message_schema.model_dump(exclude={"id"}) - message_data["agent_id"] = agent_db_id # Remap agent_id to new database ID - message_obj = Message(**message_data) - messages.append(message_obj) - # Map file ID to the generated database ID immediately - message_file_to_db_ids[message_schema.id] = message_obj.id - - created_messages = await self.message_manager.create_many_messages_async(pydantic_msgs=messages, actor=actor) - imported_count += len(created_messages) - - # Remap in_context_message_ids from file IDs to database IDs - in_context_db_ids = [message_file_to_db_ids[message_schema_id] for message_schema_id in agent_schema.in_context_message_ids] - - # Update agent with the correct message_ids - await self.agent_manager.update_message_ids_async(agent_id=agent_db_id, message_ids=in_context_db_ids, actor=actor) - - # 8. Create file-agent relationships (depends on agents and files) - for agent_schema in schema.agents: - if agent_schema.files_agents: - agent_db_id = file_to_db_ids[agent_schema.id] - - # Prepare files for bulk attachment - files_for_agent = [] - visible_content_map = {} - - for file_agent_schema in agent_schema.files_agents: - file_db_id = file_to_db_ids[file_agent_schema.file_id] - - # Use cached file metadata if available - if file_db_id not in file_metadata_cache: - file_metadata_cache[file_db_id] = await self.file_manager.get_file_by_id(file_db_id, actor) - file_metadata = file_metadata_cache[file_db_id] - files_for_agent.append(file_metadata) - - if file_agent_schema.visible_content: - visible_content_map[file_db_id] = file_agent_schema.visible_content - - # Bulk attach files to agent - await self.file_agent_manager.attach_files_bulk( - agent_id=agent_db_id, files_metadata=files_for_agent, visible_content_map=visible_content_map, actor=actor - ) - imported_count += len(files_for_agent) - - return ImportResult( - success=True, - message=f"Import completed successfully. Imported {imported_count} entities.", - imported_count=imported_count, - id_mappings=file_to_db_ids, - ) - - except Exception as e: - logger.exception(f"Failed to import agent file: {e}") - raise AgentFileImportError(f"Import failed: {e}") from e - - def _validate_id_format(self, schema: AgentFileSchema) -> List[str]: - """Validate that all IDs follow the expected format""" - errors = [] - - # Define entity types and their expected prefixes - entity_checks = [ - (schema.agents, AgentSchema.__id_prefix__), - (schema.groups, GroupSchema.__id_prefix__), - (schema.blocks, BlockSchema.__id_prefix__), - (schema.files, FileSchema.__id_prefix__), - (schema.sources, SourceSchema.__id_prefix__), - (schema.tools, ToolSchema.__id_prefix__), - ] - - for entities, expected_prefix in entity_checks: - for entity in entities: - if not entity.id.startswith(f"{expected_prefix}-"): - errors.append(f"Invalid ID format: {entity.id} should start with '{expected_prefix}-'") - else: - # Check that the suffix is a valid integer - try: - suffix = entity.id[len(expected_prefix) + 1 :] - int(suffix) - except ValueError: - errors.append(f"Invalid ID format: {entity.id} should have integer suffix") - - # Also check message IDs within agents - for agent in schema.agents: - for message in agent.messages: - if not message.id.startswith(f"{MessageSchema.__id_prefix__}-"): - errors.append(f"Invalid message ID format: {message.id} should start with '{MessageSchema.__id_prefix__}-'") - else: - # Check that the suffix is a valid integer - try: - suffix = message.id[len(MessageSchema.__id_prefix__) + 1 :] - int(suffix) - except ValueError: - errors.append(f"Invalid message ID format: {message.id} should have integer suffix") - - return errors - - def _validate_duplicate_ids(self, schema: AgentFileSchema) -> List[str]: - """Validate that there are no duplicate IDs within or across entity types""" - errors = [] - all_ids = set() - - # Check each entity type for internal duplicates and collect all IDs - entity_collections = [ - ("agents", schema.agents), - ("groups", schema.groups), - ("blocks", schema.blocks), - ("files", schema.files), - ("sources", schema.sources), - ("tools", schema.tools), - ] - - for entity_type, entities in entity_collections: - entity_ids = [entity.id for entity in entities] - - # Check for duplicates within this entity type - seen = set() - duplicates = set() - for entity_id in entity_ids: - if entity_id in seen: - duplicates.add(entity_id) - else: - seen.add(entity_id) - - if duplicates: - errors.append(f"Duplicate {entity_type} IDs found: {duplicates}") - - # Check for duplicates across all entity types - for entity_id in entity_ids: - if entity_id in all_ids: - errors.append(f"Duplicate ID across entity types: {entity_id}") - all_ids.add(entity_id) - - # Also check message IDs within agents - for agent in schema.agents: - message_ids = [msg.id for msg in agent.messages] - - # Check for duplicates within agent messages - seen = set() - duplicates = set() - for message_id in message_ids: - if message_id in seen: - duplicates.add(message_id) - else: - seen.add(message_id) - - if duplicates: - errors.append(f"Duplicate message IDs in agent {agent.id}: {duplicates}") - - # Check for duplicates across all entity types - for message_id in message_ids: - if message_id in all_ids: - errors.append(f"Duplicate ID across entity types: {message_id}") - all_ids.add(message_id) - - return errors - - def _validate_file_source_references(self, schema: AgentFileSchema) -> List[str]: - """Validate that all file source_id references exist""" - errors = [] - source_ids = {source.id for source in schema.sources} - - for file in schema.files: - if file.source_id not in source_ids: - errors.append(f"File {file.id} references non-existent source {file.source_id}") - - return errors - - def _validate_file_agent_references(self, schema: AgentFileSchema) -> List[str]: - """Validate that all file-agent relationships reference existing entities""" - errors = [] - file_ids = {file.id for file in schema.files} - source_ids = {source.id for source in schema.sources} - {agent.id for agent in schema.agents} - - for agent in schema.agents: - for file_agent in agent.files_agents: - if file_agent.file_id not in file_ids: - errors.append(f"File-agent relationship references non-existent file {file_agent.file_id}") - if file_agent.source_id not in source_ids: - errors.append(f"File-agent relationship references non-existent source {file_agent.source_id}") - if file_agent.agent_id != agent.id: - errors.append(f"File-agent relationship has mismatched agent_id {file_agent.agent_id} vs {agent.id}") - - return errors - - def _validate_schema(self, schema: AgentFileSchema): - """ - Validate the agent file schema for consistency and referential integrity. - - Args: - schema: The schema to validate - - Raises: - AgentFileImportError: If validation fails - """ - errors = [] - - # 1. ID Format Validation - errors.extend(self._validate_id_format(schema)) - - # 2. Duplicate ID Detection - errors.extend(self._validate_duplicate_ids(schema)) - - # 3. File Source Reference Validation - errors.extend(self._validate_file_source_references(schema)) - - # 4. File-Agent Reference Validation - errors.extend(self._validate_file_agent_references(schema)) - - if errors: - raise AgentFileImportError(f"Schema validation failed: {'; '.join(errors)}") - - logger.info("Schema validation passed") diff --git a/letta/services/agent_generate_completion_manager.py b/letta/services/agent_generate_completion_manager.py new file mode 100644 index 00000000..c01d6936 --- /dev/null +++ b/letta/services/agent_generate_completion_manager.py @@ -0,0 +1,222 @@ +"""Manager for handling direct LLM completions using agent configuration.""" + +from typing import TYPE_CHECKING, Any, Dict, Optional + +from letta.errors import LLMError +from letta.llm_api.llm_client import LLMClient +from letta.log import get_logger +from letta.schemas.enums import AgentType, MessageRole +from letta.schemas.letta_message_content import TextContent +from letta.schemas.message import Message +from letta.schemas.usage import LettaUsageStatistics + +# Tool name used for structured output via tool forcing +STRUCTURED_OUTPUT_TOOL_NAME = "structured_output" + +if TYPE_CHECKING: + from letta.orm import User + from letta.schemas.llm_config import LLMConfig + from letta.server.server import SyncServer + +logger = get_logger(__name__) + + +def _schema_to_tool_definition(schema: Dict[str, Any]) -> Dict[str, Any]: + """ + Convert a JSON schema into a tool definition for forced tool calling. + + Args: + schema: JSON schema object with 'properties' and optionally 'required' + + Returns: + Tool definition dict compatible with OpenAI/Anthropic function calling format + """ + return { + "name": STRUCTURED_OUTPUT_TOOL_NAME, + "description": "Returns a structured response matching the requested schema.", + "parameters": { + "type": "object", + "properties": schema.get("properties", {}), + "required": schema.get("required", list(schema.get("properties", {}).keys())), + }, + } + + +class GenerateResponse: + """Response from direct LLM generation.""" + + def __init__(self, content: str, model: str, usage: LettaUsageStatistics): + self.content = content + self.model = model + self.usage = usage + + +class AgentGenerateCompletionManager: + """Manager for handling direct LLM completions using agent configuration.""" + + def __init__(self, server: "SyncServer"): + """ + Initialize the agent generate completion manager. + + Args: + server: The SyncServer instance for accessing managers + """ + self.server = server + self.agent_manager = server.agent_manager + self.provider_manager = server.provider_manager + + async def generate_completion_with_agent_config_async( + self, + agent_id: str, + prompt: str, + actor: "User", + system_prompt: Optional[str] = None, + override_model: Optional[str] = None, + response_schema: Optional[Dict[str, Any]] = None, + ) -> GenerateResponse: + """ + Generate a completion directly from the LLM provider using the agent's configuration. + + This method makes a direct request to the LLM provider without any agent processing: + - No memory or context retrieval + - No tool calling (unless response_schema is provided) + - No message persistence + - No agent state modification + + Args: + agent_id: The agent ID whose configuration to use + prompt: The prompt/message to send to the LLM + actor: The user making the request + system_prompt: Optional system prompt to prepend to the conversation + override_model: Optional model handle to override the agent's default + (e.g., 'openai/gpt-4', 'anthropic/claude-3-5-sonnet') + response_schema: Optional JSON schema for structured output. When provided, + the LLM will be forced to return a response matching this + schema via tool calling. + + Returns: + GenerateResponse with content, model, and usage statistics. + When response_schema is provided, content will be the JSON string + matching the schema. + + Raises: + NoResultFound: If agent not found + HandleNotFoundError: If override_model is invalid + LLMError: If LLM provider error occurs + """ + # 1. Validate agent exists and user has access + agent = await self.agent_manager.get_agent_by_id_async( + agent_id, + actor, + include_relationships=[], + ) + + # 2. Get LLM config (with optional override) + llm_config: "LLMConfig" = agent.llm_config + if override_model: + # Get full LLM config for the override model + # This ensures we get the right provider, endpoint, credentials, etc. + llm_config = await self.server.get_llm_config_from_handle_async( + actor=actor, + handle=override_model, + ) + + logger.info( + f"Generating completion for agent {agent_id}", + extra={ + "agent_id": str(agent_id), + "override_model": override_model, + "prompt_length": len(prompt), + "has_system_prompt": system_prompt is not None, + "has_response_schema": response_schema is not None, + "model": llm_config.model, + }, + ) + + # 3. Build messages from prompt and optional system_prompt + letta_messages = [] + + # Always add a system message (required by some providers like Anthropic) + # Use provided system_prompt or minimal default (empty strings not allowed with cache_control) + letta_messages.append( + Message( + role=MessageRole.system, + content=[TextContent(text=system_prompt if system_prompt else "You are a helpful assistant.")], + ) + ) + + # Add user prompt + letta_messages.append( + Message( + role=MessageRole.user, + content=[TextContent(text=prompt)], + ) + ) + + # 4. Create LLM client for the provider + llm_client = LLMClient.create( + provider_type=llm_config.model_endpoint_type, + actor=actor, + ) + + if llm_client is None: + raise LLMError(f"Unsupported provider type: {llm_config.model_endpoint_type}") + + # 5. Build request data + # If response_schema is provided, create a tool and force the model to call it + tools = None + force_tool_call = None + if response_schema: + tools = [_schema_to_tool_definition(response_schema)] + force_tool_call = STRUCTURED_OUTPUT_TOOL_NAME + + # TODO: create a separate agent type + effective_agent_type = AgentType.split_thread_agent if response_schema else agent.agent_type + + request_data = llm_client.build_request_data( + agent_type=effective_agent_type, + messages=letta_messages, + llm_config=llm_config, + tools=tools, + force_tool_call=force_tool_call, + ) + + # 6. Make direct LLM request + response_data = await llm_client.request_async(request_data, llm_config) + + # 7. Convert to standard chat completion format + chat_completion = await llm_client.convert_response_to_chat_completion( + response_data, + letta_messages, + llm_config, + ) + + # 8. Extract response content + content = "" + if chat_completion.choices and len(chat_completion.choices) > 0: + message = chat_completion.choices[0].message + + if response_schema: + # When using structured output, extract from tool call arguments + if message.tool_calls and len(message.tool_calls) > 0: + # The tool call arguments contain the structured output as JSON string + content = message.tool_calls[0].function.arguments + else: + # Fallback: some providers may return in content even with tool forcing + content = message.content or "" + logger.warning( + "Expected tool call for structured output but got content response", + extra={"agent_id": str(agent_id), "content_length": len(content)}, + ) + else: + content = message.content or "" + + # 9. Extract usage statistics + usage = llm_client.extract_usage_statistics(response_data, llm_config) + + # 10. Build and return response + return GenerateResponse( + content=content, + model=llm_config.model, + usage=usage, + ) diff --git a/letta/services/agent_manager.py b/letta/services/agent_manager.py index 56b6a62f..b167d1db 100644 --- a/letta/services/agent_manager.py +++ b/letta/services/agent_manager.py @@ -19,16 +19,15 @@ from letta.constants import ( DEFAULT_CORE_MEMORY_SOURCE_CHAR_LIMIT, DEFAULT_MAX_FILES_OPEN, DEFAULT_TIMEZONE, - DEPRECATED_LETTA_TOOLS, EXCLUDE_MODEL_KEYWORDS_FROM_BASE_TOOL_RULES, FILES_TOOLS, INCLUDE_MODEL_KEYWORDS_BASE_TOOL_RULES, RETRIEVAL_QUERY_DEFAULT_PAGE_SIZE, ) -from letta.errors import LettaAgentNotFoundError, LettaInvalidArgumentError + +from letta.errors import LettaAgentNotFoundError, LettaError, LettaInvalidArgumentError from letta.helpers import ToolRulesSolver from letta.helpers.datetime_helpers import get_utc_time -from letta.llm_api.llm_client import LLMClient from letta.log import get_logger from letta.orm import ( Agent as AgentModel, @@ -47,12 +46,11 @@ from letta.orm import ( ToolsAgents, ) from letta.orm.errors import NoResultFound -from letta.orm.sandbox_config import AgentEnvironmentVariable, AgentEnvironmentVariable as AgentEnvironmentVariableModel +from letta.orm.sandbox_config import AgentEnvironmentVariable from letta.orm.sqlalchemy_base import AccessType from letta.otel.tracing import trace_method from letta.prompts.prompt_generator import PromptGenerator from letta.schemas.agent import ( - AgentRelationships, AgentState as PydanticAgentState, CreateAgent, InternalTemplateAgentCreate, @@ -60,7 +58,7 @@ from letta.schemas.agent import ( ) from letta.schemas.block import DEFAULT_BLOCKS, Block as PydanticBlock, BlockUpdate from letta.schemas.embedding_config import EmbeddingConfig -from letta.schemas.enums import AgentType, PrimitiveType, ProviderType, TagMatchMode, ToolType, VectorDBProvider +from letta.schemas.enums import AgentType, PrimitiveType, TagMatchMode, ToolType, VectorDBProvider from letta.schemas.environment_variables import AgentEnvironmentVariable as PydanticAgentEnvVar from letta.schemas.file import FileMetadata as PydanticFileMetadata from letta.schemas.group import Group as PydanticGroup, ManagerType @@ -74,10 +72,6 @@ from letta.schemas.source import Source as PydanticSource from letta.schemas.tool import Tool as PydanticTool from letta.schemas.tool_rule import ContinueToolRule, RequiresApprovalToolRule, TerminalToolRule from letta.schemas.user import User as PydanticUser -from letta.serialize_schemas import MarshmallowAgentSchema -from letta.serialize_schemas.marshmallow_message import SerializedMessageSchema -from letta.serialize_schemas.marshmallow_tool import SerializedToolSchema -from letta.serialize_schemas.pydantic_agent_schema import AgentSchema from letta.server.db import db_registry from letta.services.archive_manager import ArchiveManager from letta.services.block_manager import BlockManager, validate_block_limit_constraint @@ -89,11 +83,9 @@ from letta.services.files_agents_manager import FileAgentManager from letta.services.helpers.agent_manager_helper import ( _apply_filters, _apply_identity_filters, - _apply_pagination, _apply_pagination_async, _apply_relationship_filters, _apply_tag_filter, - _process_relationship, _process_relationship_async, build_agent_passage_query, build_passage_query, @@ -113,7 +105,7 @@ from letta.services.message_manager import MessageManager from letta.services.passage_manager import PassageManager from letta.services.source_manager import SourceManager from letta.services.tool_manager import ToolManager -from letta.settings import DatabaseChoice, model_settings, settings +from letta.settings import DatabaseChoice, settings from letta.utils import ( bounded_gather, calculate_file_defaults_based_on_context_window, @@ -129,8 +121,8 @@ logger = get_logger(__name__) class AgentManager: """Manager class to handle business logic related to Agents.""" - def __init__(self): - self.block_manager = BlockManager() + def __init__(self, block_manager: Optional[BlockManager] = None): + self.block_manager = block_manager or BlockManager() self.tool_manager = ToolManager() self.source_manager = SourceManager() self.message_manager = MessageManager() @@ -351,9 +343,11 @@ class AgentManager: # For v1 agents, enforce sane defaults even when reasoning is omitted if agent_create.agent_type == AgentType.letta_v1_agent: - # Claude 3.7/4 or OpenAI o1/o3/o4/gpt-5 - default_reasoning = LLMConfig.is_anthropic_reasoning_model(agent_create.llm_config) or LLMConfig.is_openai_reasoning_model( - agent_create.llm_config + # Claude 3.7/4 or OpenAI o1/o3/o4/gpt-5 or ZAI GLM-4.5+ + default_reasoning = ( + LLMConfig.is_anthropic_reasoning_model(agent_create.llm_config) + or LLMConfig.is_openai_reasoning_model(agent_create.llm_config) + or LLMConfig.is_zai_reasoning_model(agent_create.llm_config) ) agent_create.llm_config = LLMConfig.apply_reasoning_setting_to_config( agent_create.llm_config, @@ -495,6 +489,34 @@ class AgentManager: if tool_rules: check_supports_structured_output(model=agent_create.llm_config.model, tool_rules=tool_rules) + # Update agent's compaction settings with defaults if needed + from letta.schemas.enums import ProviderType + from letta.services.summarizer.summarizer_config import CompactionSettings, get_default_summarizer_model + + effective_compaction_settings = agent_create.compaction_settings + # Use provider_name if set, otherwise fall back to model_endpoint_type + provider_name = agent_create.llm_config.provider_name or agent_create.llm_config.model_endpoint_type + + # Convert to ProviderType enum to get default summarizer model + try: + default_model = get_default_summarizer_model(provider_type=ProviderType(provider_name)) + except (ValueError, TypeError): # unknown provider + default_model = None + + # Use agent's model as fallback + if not default_model: + default_model = agent_create.llm_config.model + + if effective_compaction_settings is None: + # If no settings provided, INITIALIZE with default model + effective_compaction_settings = CompactionSettings(model=default_model) + elif effective_compaction_settings is not None and effective_compaction_settings.model is None: + # If settings provided but no model, UPDATE with default model + effective_compaction_settings = effective_compaction_settings.model_copy(update={"model": default_model}) + + # Will set mode-specific default prompt if no prompt is provided + effective_compaction_settings = effective_compaction_settings.set_mode_specific_prompt() + new_agent = AgentModel( name=agent_create.name, system=derive_system_message( @@ -505,7 +527,7 @@ class AgentManager: agent_type=agent_create.agent_type, llm_config=agent_create.llm_config, embedding_config=agent_create.embedding_config, - compaction_settings=agent_create.compaction_settings, + compaction_settings=effective_compaction_settings, organization_id=actor.organization_id, description=agent_create.description, metadata_=agent_create.metadata, @@ -606,24 +628,30 @@ class AgentManager: result.tool_exec_environment_variables = env_vars result.secrets = env_vars - # initial message sequence (skip if _init_with_no_messages is True) + # initial message sequence (skip non-system messages if _init_with_no_messages is True) if not _init_with_no_messages: init_messages = await self._generate_initial_message_sequence_async( actor, agent_state=result, supplied_initial_message_sequence=agent_create.initial_message_sequence, ) - result.message_ids = [msg.id for msg in init_messages] - new_agent.message_ids = [msg.id for msg in init_messages] - await new_agent.update_async(session, no_refresh=True) else: - init_messages = [] + all_messages = await initialize_message_sequence_async( + agent_state=result, memory_edit_timestamp=get_utc_time(), include_initial_boot_message=True + ) + init_messages = [ + PydanticMessage.dict_to_message( + agent_id=result.id, model=result.llm_config.model, openai_message_dict=all_messages[0] + ) + ] - # Only create messages if we initialized with messages - if not _init_with_no_messages: - await self.message_manager.create_many_messages_async( - pydantic_msgs=init_messages, actor=actor, project_id=result.project_id, template_id=result.template_id - ) + result.message_ids = [msg.id for msg in init_messages] + new_agent.message_ids = [msg.id for msg in init_messages] + await new_agent.update_async(session, no_refresh=True) + + await self.message_manager.create_many_messages_async( + pydantic_msgs=init_messages, actor=actor, project_id=result.project_id, template_id=result.template_id + ) # Attach files from sources if this is a template-based creation # Use the new agent's sources (already copied from template via source_ids) @@ -1328,6 +1356,11 @@ class AgentManager: @trace_method def get_system_message(self, agent_id: str, actor: PydanticUser) -> PydanticMessage: message_ids = self.get_agent_by_id(agent_id=agent_id, actor=actor).message_ids + if not message_ids: + raise LettaError( + message=f"Agent {agent_id} has no in-context messages. " + "This typically means the agent's system message was not initialized correctly.", + ) return self.message_manager.get_message_by_id(message_id=message_ids[0], actor=actor) @enforce_types @@ -1335,6 +1368,11 @@ class AgentManager: @trace_method async def get_system_message_async(self, agent_id: str, actor: PydanticUser) -> PydanticMessage: agent = await self.get_agent_by_id_async(agent_id=agent_id, include_relationships=[], actor=actor) + if not agent.message_ids: + raise LettaError( + message=f"Agent {agent_id} has no in-context messages. " + "This typically means the agent's system message was not initialized correctly.", + ) return await self.message_manager.get_message_by_id_async(message_id=agent.message_ids[0], actor=actor) # TODO: This is duplicated below @@ -1522,7 +1560,7 @@ class AgentManager: @trace_method def trim_older_in_context_messages(self, num: int, agent_id: str, actor: PydanticUser) -> PydanticAgentState: message_ids = self.get_agent_by_id(agent_id=agent_id, actor=actor).message_ids - new_messages = [message_ids[0]] + message_ids[num:] # 0 is system message + new_messages = [message_ids[0], *message_ids[num:]] return self.set_in_context_messages(agent_id=agent_id, message_ids=new_messages, actor=actor) @enforce_types @@ -1565,21 +1603,30 @@ class AgentManager: @enforce_types @trace_method async def reset_messages_async( - self, agent_id: str, actor: PydanticUser, add_default_initial_messages: bool = False, needs_agent_state: bool = True + self, + agent_id: str, + actor: PydanticUser, + add_default_initial_messages: bool = False, + needs_agent_state: bool = True, + rebuild_system_prompt: bool = False, ) -> Optional[PydanticAgentState]: """ Clears all in-context messages for the specified agent except the original system message by: 1) Preserving the first message ID (original system message). 2) Updating the agent's message_ids to only contain the system message. - 3) Optionally adding default initial messages after the system message. + 3) Optionally rebuilding the system prompt with current memory blocks (for prefix caching optimization). + 4) Optionally adding default initial messages after the system message. Note: This only clears messages from the agent's context, it does not delete them from the database. Args: - add_default_initial_messages: If true, adds the default initial messages after resetting. agent_id (str): The ID of the agent whose messages will be reset. actor (PydanticUser): The user performing this action. + add_default_initial_messages: If true, adds the default initial messages after resetting. needs_agent_state: If True, returns the updated agent state. If False, returns None (for performance optimization) + rebuild_system_prompt: If True, rebuilds the system prompt with current memory blocks. + This ensures the system prompt reflects the latest memory state after reset. + Defaults to False to preserve the original system message content. Returns: Optional[PydanticAgentState]: The updated agent state with only the original system message preserved, or None if needs_agent_state=False. @@ -1599,12 +1646,17 @@ class AgentManager: agent.message_ids = [system_message_id] await agent.update_async(db_session=session, actor=actor) - # Only convert to pydantic if we need to return it or add initial messages - if add_default_initial_messages or needs_agent_state: - agent_state = await agent.to_pydantic_async(include_relationships=["sources"] if add_default_initial_messages else None) + # Only convert to pydantic if we need to return it or add initial messages or rebuild system prompt + if add_default_initial_messages or needs_agent_state or rebuild_system_prompt: + include_rels = ["sources", "memory"] if (add_default_initial_messages or rebuild_system_prompt) else None + agent_state = await agent.to_pydantic_async(include_relationships=include_rels) else: agent_state = None + # Optionally rebuild the system prompt with current memory blocks + if rebuild_system_prompt and agent_state: + agent_state, _, _, _ = await self.rebuild_system_prompt_async(agent_id=agent_state.id, actor=actor, force=True) + # Optionally add default initial messages after the system message if add_default_initial_messages: init_messages = await initialize_message_sequence_async( @@ -1672,6 +1724,7 @@ class AgentManager: blocks=blocks, file_blocks=agent_state.memory.file_blocks, agent_type=agent_state.agent_type, + git_enabled=agent_state.memory.git_enabled, ) # NOTE: don't do this since re-buildin the memory is handled at the start of the step @@ -1983,6 +2036,9 @@ class AgentManager: actor: PydanticUser, ) -> PydanticBlock: """Modifies a block attached to an agent by its label.""" + + block_id_for_custom_manager: str | None = None + async with db_registry.async_session() as session: matched_block = None agent = await AgentModel.read_async(db_session=session, identifier=agent_id, actor=actor) @@ -1995,33 +2051,46 @@ class AgentManager: update_data = block_update.model_dump(to_orm=True, exclude_unset=True, exclude_none=True) - # Extract tags from update data (it's not a column on the block table) - new_tags = update_data.pop("tags", None) - # Validate limit constraints before updating validate_block_limit_constraint(update_data, matched_block) - for key, value in update_data.items(): - setattr(matched_block, key, value) - - await matched_block.update_async(session, actor=actor) - - if new_tags is not None: - await BlockManager._replace_block_pivot_rows_async( - session, - BlocksTags.__table__, - matched_block.id, - [{"block_id": matched_block.id, "tag": tag} for tag in new_tags], - ) - - pydantic_block = matched_block.to_pydantic() - if new_tags is not None: - pydantic_block.tags = new_tags + # If a custom block manager is injected (e.g. GitEnabledBlockManager), route + # through it so git-backed memory semantics apply. + if self.block_manager.__class__ is not BlockManager: + block_id_for_custom_manager = matched_block.id else: - tags_result = await session.execute(select(BlocksTags.tag).where(BlocksTags.block_id == matched_block.id)) - pydantic_block.tags = [row[0] for row in tags_result.fetchall()] + # Extract tags from update data (it's not a column on the block table) + new_tags = update_data.pop("tags", None) - return pydantic_block + for key, value in update_data.items(): + setattr(matched_block, key, value) + + await matched_block.update_async(session, actor=actor) + + if new_tags is not None: + await BlockManager._replace_block_pivot_rows_async( + session, + BlocksTags.__table__, + matched_block.id, + [{"block_id": matched_block.id, "tag": tag} for tag in new_tags], + ) + + pydantic_block = matched_block.to_pydantic() + if new_tags is not None: + pydantic_block.tags = new_tags + else: + tags_result = await session.execute(select(BlocksTags.tag).where(BlocksTags.block_id == matched_block.id)) + pydantic_block.tags = [row[0] for row in tags_result.fetchall()] + + return pydantic_block + + # Route through block_manager which handles git integration if enabled + assert block_id_for_custom_manager is not None + return await self.block_manager.update_block_async( + block_id=block_id_for_custom_manager, + block_update=block_update, + actor=actor, + ) @enforce_types @raise_on_invalid_id(param_name="agent_id", expected_prefix=PrimitiveType.AGENT) @@ -2033,9 +2102,9 @@ class AgentManager: agent = await AgentModel.read_async(db_session=session, identifier=agent_id, actor=actor) block = await BlockModel.read_async(db_session=session, identifier=block_id, actor=actor) - # Attach block to the main agent - agent.core_memory.append(block) - # await agent.update_async(session, actor=actor, no_commit=True) + # Attach block to the main agent (skip if already attached) + if not any(b.id == block_id for b in agent.core_memory): + agent.core_memory.append(block) await agent.update_async(session) # If agent is part of a sleeptime group, attach block to the sleeptime_agent @@ -2047,9 +2116,7 @@ class AgentManager: try: other_agent = await AgentModel.read_async(db_session=session, identifier=other_agent_id, actor=actor) if other_agent.agent_type == AgentType.sleeptime_agent: - # Check if block with same label already exists - existing_block = next((b for b in other_agent.core_memory if b.label == block.label), None) - if not existing_block: + if not any(b.id == block_id for b in other_agent.core_memory): other_agent.core_memory.append(block) await other_agent.update_async(session, actor=actor) except NoResultFound: @@ -2185,7 +2252,6 @@ class AgentManager: Lists all passages attached to an agent (combines both source and agent passages). """ - import warnings logger.warning( "list_passages_async is deprecated. Use query_source_passages_async or query_agent_passages_async instead.", diff --git a/letta/services/agent_serialization_manager.py b/letta/services/agent_serialization_manager.py index 1947a8ee..30be1ecb 100644 --- a/letta/services/agent_serialization_manager.py +++ b/letta/services/agent_serialization_manager.py @@ -1,4 +1,3 @@ -import asyncio import uuid from datetime import datetime, timezone from typing import Any, Dict, List, Optional @@ -25,12 +24,13 @@ from letta.schemas.agent_file import ( ImportResult, MCPServerSchema, MessageSchema, + SkillSchema, SourceSchema, ToolSchema, ) from letta.schemas.block import Block from letta.schemas.embedding_config import EmbeddingConfig -from letta.schemas.enums import FileProcessingStatus, VectorDBProvider +from letta.schemas.enums import FileProcessingStatus from letta.schemas.file import FileMetadata from letta.schemas.group import Group, GroupCreate from letta.schemas.llm_config import LLMConfig @@ -161,7 +161,7 @@ class AgentSerializationManager: return sorted(unique_blocks.values(), key=lambda x: x.label) async def _extract_unique_sources_and_files_from_agents( - self, agent_states: List[AgentState], actor: User, files_agents_cache: dict = None + self, agent_states: List[AgentState], actor: User, files_agents_cache: dict | None = None ) -> tuple[List[Source], List[FileMetadata]]: """Extract unique sources and files from agent states using bulk operations""" @@ -188,7 +188,13 @@ class AgentSerializationManager: return sources, files - async def _convert_agent_state_to_schema(self, agent_state: AgentState, actor: User, files_agents_cache: dict = None) -> AgentSchema: + async def _convert_agent_state_to_schema( + self, + agent_state: AgentState, + actor: User, + files_agents_cache: dict | None = None, + scrub_messages: bool = False, + ) -> AgentSchema: """Convert AgentState to AgentSchema with ID remapping""" agent_file_id = self._map_db_to_file_id(agent_state.id, AgentSchema.__id_prefix__) @@ -209,21 +215,27 @@ class AgentSerializationManager: ) agent_schema.id = agent_file_id - # Ensure all in-context messages are present before ID remapping. - # AgentSchema.from_agent_state fetches a limited slice (~50) and may exclude messages still - # referenced by in_context_message_ids. Fetch any missing in-context messages by ID so remapping succeeds. - existing_msg_ids = {m.id for m in (agent_schema.messages or [])} - in_context_ids = agent_schema.in_context_message_ids or [] - missing_in_context_ids = [mid for mid in in_context_ids if mid not in existing_msg_ids] - if missing_in_context_ids: - missing_msgs = await self.message_manager.get_messages_by_ids_async(message_ids=missing_in_context_ids, actor=actor) - fetched_ids = {m.id for m in missing_msgs} - not_found = [mid for mid in missing_in_context_ids if mid not in fetched_ids] - if not_found: - # Surface a clear mapping error; handled upstream by the route/export wrapper. - raise AgentExportIdMappingError(db_id=not_found[0], entity_type=MessageSchema.__id_prefix__) - for msg in missing_msgs: - agent_schema.messages.append(MessageSchema.from_message(msg)) + # Handle message scrubbing + if not scrub_messages: + # Ensure all in-context messages are present before ID remapping. + # AgentSchema.from_agent_state fetches a limited slice (~50) and may exclude messages still + # referenced by in_context_message_ids. Fetch any missing in-context messages by ID so remapping succeeds. + existing_msg_ids = {m.id for m in (agent_schema.messages or [])} + in_context_ids = agent_schema.in_context_message_ids or [] + missing_in_context_ids = [mid for mid in in_context_ids if mid not in existing_msg_ids] + if missing_in_context_ids: + missing_msgs = await self.message_manager.get_messages_by_ids_async(message_ids=missing_in_context_ids, actor=actor) + fetched_ids = {m.id for m in missing_msgs} + not_found = [mid for mid in missing_in_context_ids if mid not in fetched_ids] + if not_found: + # Surface a clear mapping error; handled upstream by the route/export wrapper. + raise AgentExportIdMappingError(db_id=not_found[0], entity_type=MessageSchema.__id_prefix__) + for msg in missing_msgs: + agent_schema.messages.append(MessageSchema.from_message(msg)) + else: + # Scrub all messages from export + agent_schema.messages = [] + agent_schema.in_context_message_ids = [] # wipe the values of tool_exec_environment_variables (they contain secrets) agent_secrets = agent_schema.secrets or agent_schema.tool_exec_environment_variables @@ -231,17 +243,18 @@ class AgentSerializationManager: agent_schema.tool_exec_environment_variables = {key: "" for key in agent_secrets} agent_schema.secrets = {key: "" for key in agent_secrets} - if agent_schema.messages: - for message in agent_schema.messages: - message_file_id = self._map_db_to_file_id(message.id, MessageSchema.__id_prefix__) - message.id = message_file_id - message.agent_id = agent_file_id + if not scrub_messages: + if agent_schema.messages: + for message in agent_schema.messages: + message_file_id = self._map_db_to_file_id(message.id, MessageSchema.__id_prefix__) + message.id = message_file_id + message.agent_id = agent_file_id - if agent_schema.in_context_message_ids: - agent_schema.in_context_message_ids = [ - self._map_db_to_file_id(message_id, MessageSchema.__id_prefix__, allow_new=False) - for message_id in agent_schema.in_context_message_ids - ] + if agent_schema.in_context_message_ids: + agent_schema.in_context_message_ids = [ + self._map_db_to_file_id(message_id, MessageSchema.__id_prefix__, allow_new=False) + for message_id in agent_schema.in_context_message_ids + ] if agent_schema.tool_ids: agent_schema.tool_ids = [self._map_db_to_file_id(tool_id, ToolSchema.__id_prefix__) for tool_id in agent_schema.tool_ids] @@ -359,7 +372,14 @@ class AgentSerializationManager: logger.error(f"Failed to convert group {group.id}: {e}") raise - async def export(self, agent_ids: List[str], actor: User, conversation_id: Optional[str] = None) -> AgentFileSchema: + async def export( + self, + agent_ids: List[str], + actor: User, + conversation_id: Optional[str] = None, + skills: Optional[List[SkillSchema]] = None, + scrub_messages: bool = False, + ) -> AgentFileSchema: """ Export agents and their related entities to AgentFileSchema format. @@ -367,6 +387,10 @@ class AgentSerializationManager: agent_ids: List of agent UUIDs to export conversation_id: Optional conversation ID. If provided, uses the conversation's in-context message_ids instead of the agent's global message_ids. + skills: Optional list of skills to include in the export. Skills are resolved + client-side and passed as SkillSchema objects. + scrub_messages: If True, excludes all messages from the export. Useful for + sharing agent configs without conversation history. Returns: AgentFileSchema with all related entities @@ -434,7 +458,12 @@ class AgentSerializationManager: # Convert to schemas with ID remapping (reusing cached file-agent data) agent_schemas = [ - await self._convert_agent_state_to_schema(agent_state, actor=actor, files_agents_cache=files_agents_cache) + await self._convert_agent_state_to_schema( + agent_state, + actor=actor, + files_agents_cache=files_agents_cache, + scrub_messages=scrub_messages, + ) for agent_state in agent_states ] tool_schemas = [self._convert_tool_to_schema(tool) for tool in tool_set] @@ -455,6 +484,7 @@ class AgentSerializationManager: sources=source_schemas, tools=tool_schemas, mcp_servers=mcp_server_schemas, + skills=skills or [], metadata={"revision_id": await get_latest_alembic_revision()}, created_at=datetime.now(timezone.utc), ) @@ -725,6 +755,10 @@ class AgentSerializationManager: agent_db_id = file_to_db_ids[agent_schema.id] message_file_to_db_ids = {} + # Save placeholder message IDs so we can clean them up after successful import + agent_state = await self.agent_manager.get_agent_by_id_async(agent_db_id, actor) + placeholder_message_ids = list(agent_state.message_ids) if agent_state.message_ids else [] + # Create messages for this agent messages = [] for message_schema in agent_schema.messages: @@ -750,6 +784,10 @@ class AgentSerializationManager: # Update agent with the correct message_ids await self.agent_manager.update_message_ids_async(agent_id=agent_db_id, message_ids=in_context_db_ids, actor=actor) + # Clean up placeholder messages now that import succeeded + for placeholder_id in placeholder_message_ids: + await self.message_manager.delete_message_by_id_async(message_id=placeholder_id, actor=actor) + # 8. Create file-agent relationships (depends on agents and files) for agent_schema in schema.agents: if agent_schema.files_agents: diff --git a/letta/services/archive_manager.py b/letta/services/archive_manager.py index 28c3322a..f1a26159 100644 --- a/letta/services/archive_manager.py +++ b/letta/services/archive_manager.py @@ -1,4 +1,3 @@ -import asyncio from datetime import datetime from typing import Dict, List, Optional diff --git a/letta/services/block_manager.py b/letta/services/block_manager.py index 848c4868..ca525efa 100644 --- a/letta/services/block_manager.py +++ b/letta/services/block_manager.py @@ -1,4 +1,3 @@ -import asyncio from datetime import datetime from typing import Dict, List, Optional @@ -551,19 +550,39 @@ class BlockManager: result = await session.execute(query) blocks = result.scalars().all() - # Convert to Pydantic models + # Convert to Pydantic models and preserve caller-provided ID order pydantic_blocks = [block.to_pydantic() for block in blocks] + blocks_by_id = {b.id: b for b in pydantic_blocks} + ordered_blocks = [blocks_by_id.get(block_id) for block_id in block_ids] - # For backward compatibility, add None for missing blocks + # For backward compatibility, include None for missing blocks if len(pydantic_blocks) < len(block_ids): - {block.id for block in pydantic_blocks} - result_blocks = [] - for block_id in block_ids: - block = next((b for b in pydantic_blocks if b.id == block_id), None) - result_blocks.append(block) - return result_blocks + return ordered_blocks - return pydantic_blocks + return ordered_blocks + + @enforce_types + @trace_method + async def get_blocks_by_agent_async(self, agent_id: str, actor: PydanticUser) -> List[PydanticBlock]: + """Retrieve all blocks attached to a specific agent.""" + async with db_registry.async_session() as session: + query = ( + select(BlockModel) + .join(BlocksAgents, BlockModel.id == BlocksAgents.block_id) + .where( + BlocksAgents.agent_id == agent_id, + BlockModel.organization_id == actor.organization_id, + ) + .options( + noload(BlockModel.agents), + noload(BlockModel.identities), + noload(BlockModel.groups), + noload(BlockModel.tags), + ) + ) + result = await session.execute(query) + blocks = result.scalars().all() + return [block.to_pydantic() for block in blocks] @enforce_types @raise_on_invalid_id(param_name="block_id", expected_prefix=PrimitiveType.BLOCK) diff --git a/letta/services/block_manager_git.py b/letta/services/block_manager_git.py new file mode 100644 index 00000000..d7a7049a --- /dev/null +++ b/letta/services/block_manager_git.py @@ -0,0 +1,596 @@ +"""Git-enabled block manager that uses object storage as source of truth. + +When an agent has the GIT_MEMORY_ENABLED_TAG tag, block operations: +1. Write to git (GCS) first - source of truth +2. Update PostgreSQL as cache + +This provides full version history while maintaining fast reads from PostgreSQL. +""" + +import time +from typing import List, Optional + +from letta.constants import CORE_MEMORY_BLOCK_CHAR_LIMIT +from letta.log import get_logger +from letta.orm.block import Block as BlockModel +from letta.otel.tracing import trace_method +from letta.schemas.block import Block as PydanticBlock, BlockUpdate, CreateBlock +from letta.schemas.user import User as PydanticUser +from letta.server.db import db_registry +from letta.services.block_manager import BlockManager +from letta.services.memory_repo import MemfsClient +from letta.utils import enforce_types + +logger = get_logger(__name__) + +# Tag that enables git-based memory for an agent +GIT_MEMORY_ENABLED_TAG = "git-memory-enabled" + + +class GitEnabledBlockManager(BlockManager): + """Block manager that uses git as source of truth when enabled for an agent. + + For agents with the GIT_MEMORY_ENABLED_TAG: + - All writes go to git first, then sync to PostgreSQL + - Reads come from PostgreSQL (cache) for performance + - Full version history is maintained in git + + For agents without the tag: + - Behaves exactly like the standard BlockManager + """ + + def __init__(self, memory_repo_manager: Optional[MemfsClient] = None): + """Initialize the git-enabled block manager. + + Args: + memory_repo_manager: The memory repo manager for git operations. + If None, git features are disabled. + """ + super().__init__() + self.memory_repo_manager = memory_repo_manager + + async def _is_git_enabled_for_agent(self, agent_id: str, actor: PydanticUser) -> bool: + """Check if an agent has git-based memory enabled.""" + if self.memory_repo_manager is None: + return False + + # Check if agent has the git-memory-enabled tag + async with db_registry.async_session() as session: + from sqlalchemy import select + + from letta.orm.agents_tags import AgentsTags + + result = await session.execute( + select(AgentsTags).where( + AgentsTags.agent_id == agent_id, + AgentsTags.tag == GIT_MEMORY_ENABLED_TAG, + ) + ) + return result.scalar_one_or_none() is not None + + async def _get_agent_id_for_block(self, block_id: str, actor: PydanticUser) -> Optional[str]: + """Get the agent ID that owns a block.""" + async with db_registry.async_session() as session: + from sqlalchemy import select + + from letta.orm.blocks_agents import BlocksAgents + + result = await session.execute(select(BlocksAgents.agent_id).where(BlocksAgents.block_id == block_id)) + row = result.first() + return row[0] if row else None + + async def _sync_block_to_postgres( + self, + agent_id: str, + label: str, + value: str, + actor: PydanticUser, + description: Optional[str] = None, + limit: Optional[int] = None, + read_only: Optional[bool] = None, + metadata: Optional[dict] = None, + ) -> PydanticBlock: + """Sync a block from git to PostgreSQL cache.""" + async with db_registry.async_session() as session: + from sqlalchemy import select + + from letta.orm.blocks_agents import BlocksAgents + + # Find existing block for this agent+label + result = await session.execute( + select(BlockModel) + .join(BlocksAgents, BlocksAgents.block_id == BlockModel.id) + .where( + BlocksAgents.agent_id == agent_id, + BlockModel.label == label, + BlockModel.organization_id == actor.organization_id, + ) + ) + block = result.scalar_one_or_none() + + if block: + # Update existing block + block.value = value + if description is not None: + block.description = description + if limit is not None: + block.limit = limit + if read_only is not None: + block.read_only = read_only + if metadata is not None: + block.metadata_ = metadata + await block.update_async(db_session=session, actor=actor) + else: + # Create new block and link to agent in a single transaction + from letta.schemas.block import BaseBlock + + block = BlockModel( + id=BaseBlock.generate_id(), + label=label, + value=value, + description=description or f"{label} block", + limit=limit or CORE_MEMORY_BLOCK_CHAR_LIMIT, + read_only=read_only or False, + metadata_=metadata or {}, + organization_id=actor.organization_id, + ) + await block.create_async(db_session=session, actor=actor, no_commit=True) + + # Link to agent + from letta.orm.blocks_agents import BlocksAgents + + blocks_agents = BlocksAgents( + agent_id=agent_id, + block_id=block.id, + block_label=label, + ) + session.add(blocks_agents) + await session.commit() + + return block.to_pydantic() + + async def _delete_block_from_postgres( + self, + agent_id: str, + label: str, + actor: PydanticUser, + ) -> None: + """Delete a block from PostgreSQL cache.""" + async with db_registry.async_session() as session: + from sqlalchemy import delete, select + + from letta.orm.blocks_agents import BlocksAgents + + # Find block for this agent+label + result = await session.execute( + select(BlockModel) + .join(BlocksAgents, BlocksAgents.block_id == BlockModel.id) + .where( + BlocksAgents.agent_id == agent_id, + BlockModel.label == label, + BlockModel.organization_id == actor.organization_id, + ) + ) + block = result.scalar_one_or_none() + + if block: + # Delete from blocks_agents + await session.execute(delete(BlocksAgents).where(BlocksAgents.block_id == block.id)) + # Delete the block + await block.hard_delete_async(db_session=session, actor=actor) + + # ========================================================================= + # Override BlockManager methods to add git integration + # ========================================================================= + + @enforce_types + @trace_method + async def update_block_async( + self, + block_id: str, + block_update: BlockUpdate, + actor: PydanticUser, + ) -> PydanticBlock: + """Update a block. If git-enabled, commits to git first.""" + t_start = time.perf_counter() + logger.info(f"[GIT_PERF] update_block_async START block_id={block_id}") + + # Get agent ID for this block + t0 = time.perf_counter() + agent_id = await self._get_agent_id_for_block(block_id, actor) + logger.info(f"[GIT_PERF] _get_agent_id_for_block took {(time.perf_counter() - t0) * 1000:.2f}ms agent_id={agent_id}") + + # Check if git is enabled for this agent + t0 = time.perf_counter() + git_enabled = agent_id and await self._is_git_enabled_for_agent(agent_id, actor) + logger.info(f"[GIT_PERF] _is_git_enabled_for_agent took {(time.perf_counter() - t0) * 1000:.2f}ms enabled={git_enabled}") + + if git_enabled: + # Get current block to get label + t0 = time.perf_counter() + async with db_registry.async_session() as session: + block = await BlockModel.read_async(db_session=session, identifier=block_id, actor=actor) + label = block.label + logger.info(f"[GIT_PERF] BlockModel.read_async took {(time.perf_counter() - t0) * 1000:.2f}ms label={label}") + + # 1. Commit to git (source of truth) + # Resolve each field: use the update value if provided, else fall back + # to the current block value from Postgres. + resolved_value = block_update.value if block_update.value is not None else block.value + resolved_description = block_update.description if block_update.description is not None else block.description + resolved_limit = block_update.limit if block_update.limit is not None else block.limit + resolved_read_only = block_update.read_only if block_update.read_only is not None else block.read_only + resolved_metadata = block_update.metadata if block_update.metadata is not None else (block.metadata_ or {}) + + t0 = time.perf_counter() + commit = await self.memory_repo_manager.update_block_async( + agent_id=agent_id, + label=label, + value=resolved_value, + actor=actor, + message=f"Update {label} block", + description=resolved_description, + limit=resolved_limit, + read_only=resolved_read_only, + metadata=resolved_metadata, + ) + git_time = (time.perf_counter() - t0) * 1000 + logger.info(f"[GIT_PERF] memory_repo_manager.update_block_async took {git_time:.2f}ms commit={commit.sha[:8]}") + + # 2. Sync to PostgreSQL cache + t0 = time.perf_counter() + result = await self._sync_block_to_postgres( + agent_id=agent_id, + label=label, + value=block_update.value or block.value, + actor=actor, + description=block_update.description, + limit=block_update.limit, + ) + logger.info(f"[GIT_PERF] _sync_block_to_postgres took {(time.perf_counter() - t0) * 1000:.2f}ms") + + # Block tags are not stored in git (today); they remain Postgres-only metadata. + # Preserve legacy behavior by updating tags in Postgres even for git-enabled agents. + if block_update.tags is not None: + async with db_registry.async_session() as session: + from letta.orm.blocks_tags import BlocksTags + + await BlockManager._replace_block_pivot_rows_async( + session, + BlocksTags.__table__, + block_id, + [{"block_id": block_id, "tag": tag, "organization_id": actor.organization_id} for tag in block_update.tags], + ) + result.tags = block_update.tags + else: + async with db_registry.async_session() as session: + from sqlalchemy import select + + from letta.orm.blocks_tags import BlocksTags + + tags_result = await session.execute(select(BlocksTags.tag).where(BlocksTags.block_id == block_id)) + result.tags = [row[0] for row in tags_result.fetchall()] + + total_time = (time.perf_counter() - t_start) * 1000 + logger.info(f"[GIT_PERF] update_block_async TOTAL {total_time:.2f}ms (git-enabled path)") + return result + else: + # Fall back to standard PostgreSQL-only behavior + t0 = time.perf_counter() + result = await super().update_block_async(block_id, block_update, actor) + logger.info(f"[GIT_PERF] super().update_block_async took {(time.perf_counter() - t0) * 1000:.2f}ms") + + total_time = (time.perf_counter() - t_start) * 1000 + logger.info(f"[GIT_PERF] update_block_async TOTAL {total_time:.2f}ms (postgres-only path)") + return result + + @enforce_types + @trace_method + async def create_block_async( + self, + block: CreateBlock, + actor: PydanticUser, + agent_id: Optional[str] = None, + ) -> PydanticBlock: + """Create a block. If git-enabled and agent_id provided, commits to git first.""" + # Check if git is enabled for this agent + if agent_id and await self._is_git_enabled_for_agent(agent_id, actor): + # 1. Commit to git (source of truth) + commit = await self.memory_repo_manager.create_block_async( + agent_id=agent_id, + block=PydanticBlock( + label=block.label, + value=block.value, + description=block.description, + limit=block.limit or CORE_MEMORY_BLOCK_CHAR_LIMIT, + ), + actor=actor, + message=f"Create {block.label} block", + ) + logger.info(f"Git commit for block create: {commit.sha[:8]}") + + # 2. Sync to PostgreSQL cache + return await self._sync_block_to_postgres( + agent_id=agent_id, + label=block.label, + value=block.value, + actor=actor, + description=block.description, + limit=block.limit, + ) + else: + # Fall back to standard PostgreSQL-only behavior + return await super().create_block_async(block, actor) + + @enforce_types + @trace_method + async def delete_block_async(self, block_id: str, actor: PydanticUser) -> None: + """Delete a block. If git-enabled, commits deletion to git first.""" + # Get agent ID and label for this block + agent_id = await self._get_agent_id_for_block(block_id, actor) + + if agent_id and await self._is_git_enabled_for_agent(agent_id, actor): + # Get block label before deleting + async with db_registry.async_session() as session: + block = await BlockModel.read_async(db_session=session, identifier=block_id, actor=actor) + label = block.label + + # 1. Commit deletion to git (source of truth) + commit = await self.memory_repo_manager.delete_block_async( + agent_id=agent_id, + label=label, + actor=actor, + message=f"Delete {label} block", + ) + logger.info(f"Git commit for block delete: {commit.sha[:8]}") + + # 2. Delete from PostgreSQL cache + await self._delete_block_from_postgres(agent_id, label, actor) + else: + # Fall back to standard PostgreSQL-only behavior + await super().delete_block_async(block_id, actor) + + # ========================================================================= + # Git-specific methods + # ========================================================================= + + @enforce_types + @trace_method + async def enable_git_memory_for_agent( + self, + agent_id: str, + actor: PydanticUser, + ) -> None: + """Enable git-based memory for an agent. + + This: + 1. Adds the GIT_MEMORY_ENABLED_TAG to the agent + 2. Creates a git repo for the agent + 3. Commits current blocks as initial state + """ + if self.memory_repo_manager is None: + raise ValueError("Memory repo manager not configured") + + # If already enabled (tag exists), ensure the repo exists. + # + # This matters because tags can be added via the agent update endpoint. In that + # flow, the tag may be persisted before the git repo is created. We treat the + # tag as the source-of-truth "desired state" and backfill the repo if missing. + if await self._is_git_enabled_for_agent(agent_id, actor): + try: + # Fast check: does the repo exist in backing storage? + await self.memory_repo_manager.git.get_head_sha(agent_id=agent_id, org_id=actor.organization_id) + + # Repo exists - check if all blocks are present + blocks = await self.get_blocks_by_agent_async(agent_id, actor) + repo_files = await self.memory_repo_manager.git.get_files(agent_id=agent_id, org_id=actor.organization_id, ref="HEAD") + + # Check which blocks are missing from repo + missing_blocks = [] + for block in blocks: + expected_path = f"{block.label}.md" + if expected_path not in repo_files: + missing_blocks.append(block) + + if missing_blocks: + logger.warning( + "Git memory repo exists but missing %d/%d blocks for agent %s; backfilling", + len(missing_blocks), + len(blocks), + agent_id, + ) + # Commit missing blocks + for block in missing_blocks: + await self.memory_repo_manager.update_block_async( + agent_id=agent_id, + label=block.label, + value=block.value or "", + actor=actor, + message=f"Backfill {block.label} block", + ) + logger.info(f"Backfilled {len(missing_blocks)} missing blocks for agent {agent_id}") + else: + logger.info(f"Git memory already enabled for agent {agent_id}") + return + except FileNotFoundError: + logger.warning( + "Git memory tag present but repo missing for agent %s; creating repo from current blocks", + agent_id, + ) + blocks = await self.get_blocks_by_agent_async(agent_id, actor) + # Ensure blocks have path-based labels before creating repo. + # All existing blocks were rendered in the system prompt, so they + # need the system/ prefix. Check startswith (not "/" presence) + # because labels like "letta/letta_town" contain "/" but aren't + # yet in the system/ namespace. + for block in blocks: + if not block.label.startswith("system/"): + old_label = block.label + new_label = f"system/{block.label}" + async with db_registry.async_session() as session: + block_orm = await BlockModel.read_async(db_session=session, identifier=block.id, actor=actor) + block_orm.label = new_label + await session.commit() + block.label = new_label + logger.info(f"Transformed block label '{old_label}' -> '{new_label}' during backfill for agent {agent_id}") + await self.memory_repo_manager.create_repo_async( + agent_id=agent_id, + actor=actor, + initial_blocks=blocks, + ) + logger.info(f"Backfilled git repo for agent {agent_id} with {len(blocks)} blocks") + return + + # Get current blocks for this agent and transform labels to path-based. + # All existing blocks were in the system prompt, so they need the system/ prefix. + # Use startswith check (not "/" presence) because labels like "letta/letta_town" + # contain "/" but aren't yet in the system/ namespace. + blocks = await self.get_blocks_by_agent_async(agent_id, actor) + for block in blocks: + if not block.label.startswith("system/"): + old_label = block.label + new_label = f"system/{block.label}" + logger.info(f"Transforming block label '{old_label}' -> '{new_label}' for agent {agent_id}") + + # Rename in PostgreSQL directly + async with db_registry.async_session() as session: + block_orm = await BlockModel.read_async(db_session=session, identifier=block.id, actor=actor) + block_orm.label = new_label + await session.commit() + + block.label = new_label + + # Create git repo with path-based blocks + await self.memory_repo_manager.create_repo_async( + agent_id=agent_id, + actor=actor, + initial_blocks=blocks, + ) + + # Add the tag + async with db_registry.async_session() as session: + from letta.orm.agents_tags import AgentsTags + + tag = AgentsTags( + agent_id=agent_id, + tag=GIT_MEMORY_ENABLED_TAG, + ) + session.add(tag) + await session.commit() + + logger.info(f"Enabled git memory for agent {agent_id} with {len(blocks)} blocks") + + @enforce_types + @trace_method + async def disable_git_memory_for_agent( + self, + agent_id: str, + actor: PydanticUser, + ) -> None: + """Disable git-based memory for an agent. + + This removes the tag but keeps the git repo for historical reference. + """ + async with db_registry.async_session() as session: + from sqlalchemy import delete + + from letta.orm.agents_tags import AgentsTags + + await session.execute( + delete(AgentsTags).where( + AgentsTags.agent_id == agent_id, + AgentsTags.tag == GIT_MEMORY_ENABLED_TAG, + ) + ) + + logger.info(f"Disabled git memory for agent {agent_id}") + + @enforce_types + @trace_method + async def get_block_at_commit( + self, + agent_id: str, + label: str, + commit_sha: str, + actor: PydanticUser, + ) -> Optional[PydanticBlock]: + """Get a block's value at a specific commit. + + This is a git-only operation that reads from version history. + """ + if self.memory_repo_manager is None: + raise ValueError("Memory repo manager not configured") + + return await self.memory_repo_manager.get_block_async( + agent_id=agent_id, + label=label, + actor=actor, + ref=commit_sha, + ) + + @enforce_types + @trace_method + async def get_block_history( + self, + agent_id: str, + actor: PydanticUser, + label: Optional[str] = None, + limit: int = 50, + ): + """Get commit history for an agent's memory blocks. + + Args: + agent_id: Agent ID + actor: User performing the operation + label: Optional block label to filter by + limit: Maximum commits to return + + Returns: + List of MemoryCommit objects + """ + if self.memory_repo_manager is None: + raise ValueError("Memory repo manager not configured") + + path = f"{label}.md" if label else None + return await self.memory_repo_manager.get_history_async( + agent_id=agent_id, + actor=actor, + path=path, + limit=limit, + ) + + @enforce_types + @trace_method + async def sync_blocks_from_git( + self, + agent_id: str, + actor: PydanticUser, + ) -> List[PydanticBlock]: + """Sync all blocks from git to PostgreSQL. + + Use this to rebuild the PostgreSQL cache from git source of truth. + """ + if self.memory_repo_manager is None: + raise ValueError("Memory repo manager not configured") + + # Get all blocks from git + git_blocks = await self.memory_repo_manager.get_blocks_async( + agent_id=agent_id, + actor=actor, + ) + + # Sync each to PostgreSQL + synced_blocks = [] + for block in git_blocks: + synced = await self._sync_block_to_postgres( + agent_id=agent_id, + label=block.label, + value=block.value, + actor=actor, + description=block.description, + limit=block.limit, + ) + synced_blocks.append(synced) + + logger.info(f"Synced {len(synced_blocks)} blocks from git for agent {agent_id}") + return synced_blocks diff --git a/letta/services/clickhouse_provider_traces.py b/letta/services/clickhouse_provider_traces.py index 52ed24b0..5a86dc5e 100644 --- a/letta/services/clickhouse_provider_traces.py +++ b/letta/services/clickhouse_provider_traces.py @@ -41,10 +41,10 @@ def _parse_clickhouse_endpoint(endpoint: str) -> tuple[str, int, bool]: @dataclass(frozen=True) class ClickhouseProviderTraceRow: created_at: Any - trace_id: str + id: str step_id: str - request_data: str | None - response_data: str | None + request_json: str | None + response_json: str | None @singleton @@ -87,15 +87,15 @@ class ClickhouseProviderTraceReader: client = self._get_client() query = """ SELECT - Timestamp AS created_at, - TraceId AS trace_id, - SpanAttributes['parameter.step_id'] AS step_id, - SpanAttributes['request_data'] AS request_data, - SpanAttributes['response_data'] AS response_data - FROM llm_provider_traces - WHERE SpanAttributes['parameter.step_id'] = %(step_id)s - AND position(SpanAttributes['parameter.actor'], %(org_match)s) > 0 - ORDER BY Timestamp DESC + created_at, + id, + step_id, + request_json, + response_json + FROM llm_traces + WHERE step_id = %(step_id)s + AND organization_id = %(organization_id)s + ORDER BY created_at DESC LIMIT 1 """ @@ -103,7 +103,7 @@ class ClickhouseProviderTraceReader: query, parameters={ "step_id": step_id, - "org_match": f"organization_id='{organization_id}'", + "organization_id": organization_id, }, ) @@ -111,13 +111,12 @@ class ClickhouseProviderTraceReader: return None row = result.result_rows[0] - # Order matches SELECT above return ClickhouseProviderTraceRow( created_at=row[0], - trace_id=row[1], + id=row[1], step_id=row[2], - request_data=row[3], - response_data=row[4], + request_json=row[3], + response_json=row[4], ) async def get_provider_trace_by_step_id_async(self, *, step_id: str, organization_id: str) -> ProviderTrace | None: @@ -126,9 +125,9 @@ class ClickhouseProviderTraceReader: return None return ProviderTrace( - id=f"provider_trace-{row.trace_id}", + id=f"provider_trace-{row.id}", step_id=row.step_id, - request_json=_parse_json_maybe(row.request_data), - response_json=_parse_json_maybe(row.response_data), + request_json=_parse_json_maybe(row.request_json), + response_json=_parse_json_maybe(row.response_json), created_at=row.created_at, ) diff --git a/letta/services/context_window_calculator/context_window_calculator.py b/letta/services/context_window_calculator/context_window_calculator.py index 80fe7659..cfa3afe6 100644 --- a/letta/services/context_window_calculator/context_window_calculator.py +++ b/letta/services/context_window_calculator/context_window_calculator.py @@ -1,5 +1,5 @@ import asyncio -from typing import Any, List, Optional, Tuple +from typing import Any, Dict, List, Optional, Tuple from openai.types.beta.function_tool import FunctionTool as OpenAITool @@ -20,45 +20,195 @@ class ContextWindowCalculator: """Handles context window calculations with different token counting strategies""" @staticmethod - def extract_system_components(system_message: str) -> Tuple[str, str, str]: + def _extract_tag_content(text: str, tag_name: str) -> Optional[str]: + """ + Extract content between XML-style opening and closing tags. + + Args: + text: The text to search in + tag_name: The name of the tag (without < >) + + Returns: + The content between tags (inclusive of tags), or None if not found + + Note: + If duplicate tags exist, only the first occurrence is extracted. + """ + start_tag = f"<{tag_name}>" + end_tag = f"" + + start_idx = text.find(start_tag) + if start_idx == -1: + return None + + end_idx = text.find(end_tag, start_idx) + if end_idx == -1: + return None + + return text[start_idx : end_idx + len(end_tag)] + + @staticmethod + def _extract_system_prompt(system_message: str) -> Optional[str]: + """ + Extract the system prompt / base instructions from a system message. + + First tries to find an explicit tag. If not present + (e.g. custom system prompts from Letta Code agents), falls back to + extracting everything before the first known section tag. + + Returns: + The system prompt text, or None if the message is empty. + + Note: + The returned value is semantically different depending on agent type: + - Standard agents: includes the ... tags + - Custom prompt agents (e.g. Letta Code): raw preamble text without any tags + """ + _extract = ContextWindowCalculator._extract_tag_content + + # Preferred: explicit wrapper + tagged = _extract(system_message, "base_instructions") + if tagged is not None: + return tagged + + # Fallback: everything before the first known section tag + section_tags = ["", "", "", "", ""] + first_section_pos = len(system_message) + for tag in section_tags: + pos = system_message.find(tag) + if pos != -1 and pos < first_section_pos: + first_section_pos = pos + + prompt = system_message[:first_section_pos].strip() + return prompt if prompt else None + + @staticmethod + def _extract_top_level_tag(system_message: str, tag_name: str, container_tag: str = "memory_blocks") -> Optional[str]: + """ + Extract a tag only if it appears outside a container tag. + + This prevents extracting tags that are nested inside as + memory block labels (e.g. a block named "memory_filesystem" rendered as + inside ) from being confused with + top-level sections. + + Handles the case where a tag appears both nested (inside the container) + and at top-level — scans all occurrences to find one outside the container. + + Args: + system_message: The full system message text + tag_name: The tag to extract + container_tag: The container tag to check nesting against + + Returns: + The tag content if found at top level, None otherwise. + """ + _extract = ContextWindowCalculator._extract_tag_content + + start_tag = f"<{tag_name}>" + end_tag = f"" + + # Find the container boundaries + container_start = system_message.find(f"<{container_tag}>") + container_end = system_message.find(f"") + has_container = container_start != -1 and container_end != -1 + + # Scan all occurrences of the tag to find one outside the container + search_start = 0 + while True: + tag_start = system_message.find(start_tag, search_start) + if tag_start == -1: + return None + + # Check if this occurrence is nested inside the container + if has_container and container_start < tag_start < container_end: + # Skip past this nested occurrence + search_start = tag_start + len(start_tag) + continue + + # Found a top-level occurrence — extract it + tag_end = system_message.find(end_tag, tag_start) + if tag_end == -1: + return None + return system_message[tag_start : tag_end + len(end_tag)] + + @staticmethod + def _extract_git_core_memory(system_message: str) -> Optional[str]: + """ + Extract bare file blocks for git-enabled agents. + + Git-enabled agents render individual memory blocks as bare tags like + ... WITHOUT any container tag. + These appear after and before the next known + section tag (, , or ). + + Returns: + The text containing all bare file blocks, or None if not found. + """ + end_marker = "" + end_pos = system_message.find(end_marker) + if end_pos == -1: + return None + + start = end_pos + len(end_marker) + + # Find the next known section tag + next_section_tags = ["", "", ""] + next_section_pos = len(system_message) + for tag in next_section_tags: + pos = system_message.find(tag, start) + if pos != -1 and pos < next_section_pos: + next_section_pos = pos + + content = system_message[start:next_section_pos].strip() + return content if content else None + + @staticmethod + def extract_system_components(system_message: str) -> Dict[str, Optional[str]]: """ Extract structured components from a formatted system message. - Parses the system message to extract three distinct sections marked by XML-style tags: - - base_instructions: The core system prompt and agent instructions - - memory_blocks: The agent's core memory (persistent context) - - memory_metadata: Metadata about external memory systems + Parses the system message to extract sections marked by XML-style tags using + proper end-tag matching. Handles all agent types including: + - Standard agents with wrapper + - Custom system prompts without (e.g. Letta Code agents) + - Git-enabled agents with top-level and bare file blocks + - React/workflow agents that don't render Args: system_message: A formatted system message containing XML-style section markers Returns: - A tuple of (system_prompt, core_memory, external_memory_summary) - Each component will be an empty string if its section is not found - - Note: - This method assumes a specific format with sections delimited by: - , , and tags. - The extraction is position-based and expects sections in this order. + A dictionary with the following keys (value is None if section not found): + - system_prompt: The base instructions section (or text before first section tag) + - core_memory: The memory blocks section. For standard agents this is the + ... content. For git-enabled agents (no + but top-level ), this captures the bare + file blocks (e.g. ) that follow . + - memory_filesystem: Top-level memory filesystem (git-enabled agents only, NOT + the memory_filesystem block nested inside ) + - tool_usage_rules: The tool usage rules section + - directories: The directories section (when sources are attached) + - external_memory_summary: The memory metadata section """ - base_start = system_message.find("") - memory_blocks_start = system_message.find("") - metadata_start = system_message.find("") + _extract = ContextWindowCalculator._extract_tag_content + _extract_top = ContextWindowCalculator._extract_top_level_tag - system_prompt = "" - core_memory = "" - external_memory_summary = "" + core_memory = _extract(system_message, "memory_blocks") + memory_filesystem = _extract_top(system_message, "memory_filesystem") - if base_start != -1 and memory_blocks_start != -1: - system_prompt = system_message[base_start:memory_blocks_start].strip() + # Git-enabled agents: no , but bare file blocks after + if core_memory is None and memory_filesystem is not None: + core_memory = ContextWindowCalculator._extract_git_core_memory(system_message) - if memory_blocks_start != -1 and metadata_start != -1: - core_memory = system_message[memory_blocks_start:metadata_start].strip() - - if metadata_start != -1: - external_memory_summary = system_message[metadata_start:].strip() - - return system_prompt, core_memory, external_memory_summary + return { + "system_prompt": ContextWindowCalculator._extract_system_prompt(system_message), + "core_memory": core_memory, + "memory_filesystem": memory_filesystem, + "tool_usage_rules": _extract_top(system_message, "tool_usage_rules"), + "directories": _extract_top(system_message, "directories"), + "external_memory_summary": _extract(system_message, "memory_metadata"), + } @staticmethod def extract_summary_memory(messages: List[Any]) -> Tuple[Optional[str], int]: @@ -116,7 +266,7 @@ class ContextWindowCalculator: # Use provided message_ids or fall back to agent_state.message_ids[1:] effective_message_ids = message_ids if message_ids is not None else agent_state.message_ids[1:] messages = await message_manager.get_messages_by_ids_async(message_ids=effective_message_ids, actor=actor) - in_context_messages = [system_message_compiled] + messages + in_context_messages = [system_message_compiled, *messages] # Filter out None messages (can occur when system message is missing) original_count = len(in_context_messages) @@ -131,9 +281,14 @@ class ContextWindowCalculator: converted_messages = token_counter.convert_messages(in_context_messages) # Extract system components - system_prompt = "" - core_memory = "" - external_memory_summary = "" + components: Dict[str, Optional[str]] = { + "system_prompt": None, + "core_memory": None, + "memory_filesystem": None, + "tool_usage_rules": None, + "directories": None, + "external_memory_summary": None, + } if ( in_context_messages @@ -143,10 +298,15 @@ class ContextWindowCalculator: and isinstance(in_context_messages[0].content[0], TextContent) ): system_message = in_context_messages[0].content[0].text - system_prompt, core_memory, external_memory_summary = self.extract_system_components(system_message) + components = self.extract_system_components(system_message) - # System prompt - system_prompt = system_prompt or agent_state.system + # Extract each component with fallbacks + system_prompt = components.get("system_prompt") or agent_state.system or "" + core_memory = components.get("core_memory") or "" + memory_filesystem = components.get("memory_filesystem") or "" + tool_usage_rules = components.get("tool_usage_rules") or "" + directories = components.get("directories") or "" + external_memory_summary = components.get("external_memory_summary") or "" # Extract summary memory summary_memory, message_start_index = self.extract_summary_memory(in_context_messages) @@ -156,11 +316,14 @@ class ContextWindowCalculator: if agent_state.tools: available_functions_definitions = [OpenAITool(type="function", function=f.json_schema) for f in agent_state.tools] - # Count tokens concurrently + # Count tokens concurrently for all sections, skipping empty ones token_counts = await asyncio.gather( token_counter.count_text_tokens(system_prompt), - token_counter.count_text_tokens(core_memory), - token_counter.count_text_tokens(external_memory_summary), + token_counter.count_text_tokens(core_memory) if core_memory else asyncio.sleep(0, result=0), + token_counter.count_text_tokens(memory_filesystem) if memory_filesystem else asyncio.sleep(0, result=0), + token_counter.count_text_tokens(tool_usage_rules) if tool_usage_rules else asyncio.sleep(0, result=0), + token_counter.count_text_tokens(directories) if directories else asyncio.sleep(0, result=0), + token_counter.count_text_tokens(external_memory_summary) if external_memory_summary else asyncio.sleep(0, result=0), token_counter.count_text_tokens(summary_memory) if summary_memory else asyncio.sleep(0, result=0), ( token_counter.count_message_tokens(converted_messages[message_start_index:]) @@ -177,6 +340,9 @@ class ContextWindowCalculator: ( num_tokens_system, num_tokens_core_memory, + num_tokens_memory_filesystem, + num_tokens_tool_usage_rules, + num_tokens_directories, num_tokens_external_memory_summary, num_tokens_summary_memory, num_tokens_messages, @@ -200,6 +366,14 @@ class ContextWindowCalculator: system_prompt=system_prompt, num_tokens_core_memory=num_tokens_core_memory, core_memory=core_memory, + # New sections + num_tokens_memory_filesystem=num_tokens_memory_filesystem, + memory_filesystem=memory_filesystem if memory_filesystem else None, + num_tokens_tool_usage_rules=num_tokens_tool_usage_rules, + tool_usage_rules=tool_usage_rules if tool_usage_rules else None, + num_tokens_directories=num_tokens_directories, + directories=directories if directories else None, + # Summary and messages num_tokens_summary_memory=num_tokens_summary_memory, summary_memory=summary_memory, num_tokens_messages=num_tokens_messages, diff --git a/letta/services/context_window_calculator/token_counter.py b/letta/services/context_window_calculator/token_counter.py index 775ea804..7cbbd6a9 100644 --- a/letta/services/context_window_calculator/token_counter.py +++ b/letta/services/context_window_calculator/token_counter.py @@ -13,7 +13,6 @@ from letta.schemas.message import Message from letta.schemas.openai.chat_completion_request import Tool as OpenAITool if TYPE_CHECKING: - from letta.schemas.llm_config import LLMConfig from letta.schemas.user import User logger = get_logger(__name__) @@ -279,7 +278,7 @@ def create_token_counter( The appropriate TokenCounter instance """ from letta.llm_api.llm_client import LLMClient - from letta.settings import model_settings, settings + from letta.settings import settings # Use Gemini token counter for Google Vertex and Google AI use_gemini = model_endpoint_type in ("google_vertex", "google_ai") diff --git a/letta/services/conversation_manager.py b/letta/services/conversation_manager.py index 101598fb..3b95a2e6 100644 --- a/letta/services/conversation_manager.py +++ b/letta/services/conversation_manager.py @@ -4,7 +4,7 @@ if TYPE_CHECKING: pass # Import AgentState outside TYPE_CHECKING for @enforce_types decorator -from sqlalchemy import delete, func, select +from sqlalchemy import and_, asc, delete, desc, func, nulls_last, or_, select from letta.errors import LettaInvalidArgumentError from letta.orm.agent import Agent as AgentModel @@ -12,8 +12,8 @@ from letta.orm.block import Block as BlockModel from letta.orm.blocks_conversations import BlocksConversations from letta.orm.conversation import Conversation as ConversationModel from letta.orm.conversation_messages import ConversationMessage as ConversationMessageModel -from letta.orm.errors import NoResultFound from letta.orm.message import Message as MessageModel +from letta.orm.run import Run as RunModel from letta.otel.tracing import trace_method from letta.schemas.agent import AgentState from letta.schemas.block import Block as PydanticBlock @@ -22,6 +22,7 @@ from letta.schemas.letta_message import LettaMessage from letta.schemas.message import Message as PydanticMessage from letta.schemas.user import User as PydanticUser from letta.server.db import db_registry +from letta.services.helpers.agent_manager_helper import validate_agent_exists_async from letta.utils import enforce_types @@ -48,10 +49,14 @@ class ConversationManager: The created conversation with isolated_block_ids if any were created """ async with db_registry.async_session() as session: + # Validate that the agent exists before creating the conversation + await validate_agent_exists_async(session, agent_id, actor) conversation = ConversationModel( agent_id=agent_id, summary=conversation_create.summary, organization_id=actor.organization_id, + model=conversation_create.model, + model_settings=conversation_create.model_settings.model_dump() if conversation_create.model_settings else None, ) await conversation.create_async(session, actor=actor) @@ -101,65 +106,153 @@ class ConversationManager: @trace_method async def list_conversations( self, - agent_id: str, + agent_id: Optional[str], actor: PydanticUser, limit: int = 50, after: Optional[str] = None, summary_search: Optional[str] = None, + ascending: bool = False, + sort_by: str = "created_at", ) -> List[PydanticConversation]: - """List conversations for an agent with cursor-based pagination. + """List conversations for an agent (or all conversations) with cursor-based pagination. Args: - agent_id: The agent ID to list conversations for + agent_id: The agent ID to list conversations for (optional - returns all if not provided) actor: The user performing the action limit: Maximum number of conversations to return after: Cursor for pagination (conversation ID) summary_search: Optional text to search for within the summary field + ascending: Sort order (True for oldest first, False for newest first) + sort_by: Field to sort by ("created_at" or "last_run_completion") Returns: List of conversations matching the criteria """ async with db_registry.async_session() as session: - # If summary search is provided, use custom query - if summary_search: - from sqlalchemy import and_ - - stmt = ( - select(ConversationModel) - .where( - and_( - ConversationModel.agent_id == agent_id, - ConversationModel.organization_id == actor.organization_id, - ConversationModel.summary.isnot(None), - ConversationModel.summary.contains(summary_search), - ) + # Build base query with optional join for last_run_completion + if sort_by == "last_run_completion": + # Subquery to get the latest completed_at for each conversation + latest_run_subquery = ( + select( + RunModel.conversation_id, + func.max(RunModel.completed_at).label("last_run_completion") ) - .order_by(ConversationModel.created_at.desc()) - .limit(limit) + .where(RunModel.conversation_id.isnot(None)) + .group_by(RunModel.conversation_id) + .subquery() ) - if after: - # Add cursor filtering + # Join conversations with the subquery + stmt = ( + select(ConversationModel) + .outerjoin( + latest_run_subquery, + ConversationModel.id == latest_run_subquery.c.conversation_id + ) + ) + sort_column = latest_run_subquery.c.last_run_completion + sort_nulls_last = True + else: + # Simple query for created_at + stmt = select(ConversationModel) + sort_column = ConversationModel.created_at + sort_nulls_last = False + + # Build where conditions + conditions = [ + ConversationModel.organization_id == actor.organization_id, + ConversationModel.is_deleted == False, + ] + + # Add agent_id filter if provided + if agent_id is not None: + conditions.append(ConversationModel.agent_id == agent_id) + + # Add summary search filter if provided + if summary_search: + conditions.extend([ + ConversationModel.summary.isnot(None), + ConversationModel.summary.contains(summary_search), + ]) + + stmt = stmt.where(and_(*conditions)) + + # Handle cursor pagination + if after: + # Get the sort value for the cursor conversation + if sort_by == "last_run_completion": + cursor_query = ( + select( + ConversationModel.id, + func.max(RunModel.completed_at).label("last_run_completion") + ) + .outerjoin(RunModel, ConversationModel.id == RunModel.conversation_id) + .where(ConversationModel.id == after) + .group_by(ConversationModel.id) + ) + result = (await session.execute(cursor_query)).first() + if result: + after_id, after_sort_value = result + # Apply cursor filter + if after_sort_value is None: + # Cursor is at NULL - if ascending, get non-NULLs or NULLs with greater ID + if ascending: + stmt = stmt.where( + or_( + and_(sort_column.is_(None), ConversationModel.id > after_id), + sort_column.isnot(None) + ) + ) + else: + # If descending, get NULLs with smaller ID + stmt = stmt.where( + and_(sort_column.is_(None), ConversationModel.id < after_id) + ) + else: + # Cursor is at non-NULL + if ascending: + # Moving forward: greater values or same value with greater ID + stmt = stmt.where( + and_( + sort_column.isnot(None), + or_( + sort_column > after_sort_value, + and_(sort_column == after_sort_value, ConversationModel.id > after_id) + ) + ) + ) + else: + # Moving backward: smaller values or NULLs or same value with smaller ID + stmt = stmt.where( + or_( + sort_column.is_(None), + sort_column < after_sort_value, + and_(sort_column == after_sort_value, ConversationModel.id < after_id) + ) + ) + else: + # Simple created_at cursor after_conv = await ConversationModel.read_async( db_session=session, identifier=after, actor=actor, ) - stmt = stmt.where(ConversationModel.created_at < after_conv.created_at) + if ascending: + stmt = stmt.where(ConversationModel.created_at > after_conv.created_at) + else: + stmt = stmt.where(ConversationModel.created_at < after_conv.created_at) - result = await session.execute(stmt) - conversations = result.scalars().all() - return [conv.to_pydantic() for conv in conversations] + # Apply ordering + order_fn = asc if ascending else desc + if sort_nulls_last: + stmt = stmt.order_by(nulls_last(order_fn(sort_column)), order_fn(ConversationModel.id)) + else: + stmt = stmt.order_by(order_fn(sort_column), order_fn(ConversationModel.id)) - # Use default list logic - conversations = await ConversationModel.list_async( - db_session=session, - actor=actor, - agent_id=agent_id, - limit=limit, - after=after, - ascending=False, - ) + stmt = stmt.limit(limit) + + result = await session.execute(stmt) + conversations = result.scalars().all() return [conv.to_pydantic() for conv in conversations] @enforce_types @@ -176,12 +269,17 @@ class ConversationManager: db_session=session, identifier=conversation_id, actor=actor, + check_is_deleted=True, ) # Set attributes on the model update_data = conversation_update.model_dump(exclude_none=True) for key, value in update_data.items(): - setattr(conversation, key, value) + # model_settings needs to be serialized to dict for the JSON column + if key == "model_settings" and value is not None: + setattr(conversation, key, conversation_update.model_settings.model_dump() if conversation_update.model_settings else value) + else: + setattr(conversation, key, value) # Commit the update updated_conversation = await conversation.update_async( @@ -203,6 +301,7 @@ class ConversationManager: db_session=session, identifier=conversation_id, actor=actor, + check_is_deleted=True, ) # Get isolated blocks before modifying conversation @@ -612,6 +711,7 @@ class ConversationManager: blocks=memory_blocks, file_blocks=agent_state.memory.file_blocks, agent_type=agent_state.memory.agent_type, + git_enabled=agent_state.memory.git_enabled, ) return agent_state diff --git a/letta/services/credit_verification_service.py b/letta/services/credit_verification_service.py new file mode 100644 index 00000000..c3644818 --- /dev/null +++ b/letta/services/credit_verification_service.py @@ -0,0 +1,72 @@ +import logging +import os + +import httpx + +from letta.errors import InsufficientCreditsError + +logger = logging.getLogger(__name__) + + +class CreditVerificationService: + """Service for verifying organization credit balance before agent execution.""" + + def __init__(self): + self.endpoint = os.getenv("STEP_ORCHESTRATOR_ENDPOINT") + self.auth_key = os.getenv("STEP_COMPLETE_KEY") + + async def verify_credits(self, organization_id: str, agent_id: str) -> bool: + """ + Check if an organization has enough credits to proceed with a specific agent. + + Args: + organization_id: The organization's core ID + agent_id: The agent's ID (used to determine model-specific costs) + + Returns True if credits are available or if the service is not configured. + Raises InsufficientCreditsError if no credits remain. + """ + + if not self.endpoint or not self.auth_key: + return True + + try: + headers = {} + if self.auth_key: + headers["Authorization"] = f"Bearer {self.auth_key}" + + async with httpx.AsyncClient(timeout=5.0) as client: + response = await client.get( + f"{self.endpoint}/validate/core-organizations/{organization_id}/agents/{agent_id}", + headers=headers, + ) + response.raise_for_status() + + data = response.json() + if not data.get("hasMoreCredits", True): + # We need to test why this is firing in production. + logger.error( + f"[CREDIT VERIFICATION] Insufficient credits would have fired for organization {organization_id} and agent {agent_id}" + ) + return True + + return True + + except InsufficientCreditsError: + logger.error( + f"[CREDIT VERIFICATION] Insufficient credits would have fired for organization {organization_id} and agent {agent_id}" + ) + return True + except httpx.TimeoutException: + logger.warning(f"[CREDIT VERIFICATION] Timeout verifying credits for organization {organization_id}, agent {agent_id}") + return True + except httpx.HTTPStatusError as e: + logger.warning( + f"[CREDIT VERIFICATION] HTTP error verifying credits for organization {organization_id}, agent {agent_id}: {e.response.status_code}" + ) + return True + except Exception as e: + logger.error( + f"[CREDIT VERIFICATION] Unexpected error verifying credits for organization {organization_id}, agent {agent_id}: {e}" + ) + return True diff --git a/letta/services/file_manager.py b/letta/services/file_manager.py index ee3db939..eeab2aca 100644 --- a/letta/services/file_manager.py +++ b/letta/services/file_manager.py @@ -1,4 +1,3 @@ -import asyncio import os from datetime import datetime, timedelta, timezone from typing import List, Optional @@ -40,7 +39,9 @@ class DuplicateFileError(Exception): class FileManager: """Manager class to handle business logic related to files.""" - async def _invalidate_file_caches(self, file_id: str, actor: PydanticUser, original_filename: str = None, source_id: str = None): + async def _invalidate_file_caches( + self, file_id: str, actor: PydanticUser, original_filename: str | None = None, source_id: str | None = None + ): """Invalidate all caches related to a file.""" # TEMPORARILY DISABLED - caching is disabled # # invalidate file content cache (all variants) @@ -701,7 +702,7 @@ class FileManager: async with db_registry.async_session() as session: # We need to import FileAgent here to avoid circular imports - from letta.orm.file_agent import FileAgent as FileAgentModel + from letta.orm.files_agents import FileAgent as FileAgentModel # Join through file-agent relationships query = ( diff --git a/letta/services/file_processor/chunker/llama_index_chunker.py b/letta/services/file_processor/chunker/llama_index_chunker.py index ab6ea4a6..f653b062 100644 --- a/letta/services/file_processor/chunker/llama_index_chunker.py +++ b/letta/services/file_processor/chunker/llama_index_chunker.py @@ -146,7 +146,9 @@ class LlamaIndexChunker: raise e # Raise the original error @trace_method - def default_chunk_text(self, content: Union[OCRPageObject, str], chunk_size: int = None, chunk_overlap: int = None) -> List[str]: + def default_chunk_text( + self, content: Union[OCRPageObject, str], chunk_size: int | None = None, chunk_overlap: int | None = None + ) -> List[str]: """Chunk text using default SentenceSplitter regardless of file type with conservative defaults""" try: from llama_index.core.node_parser import SentenceSplitter diff --git a/letta/services/file_processor/embedder/openai_embedder.py b/letta/services/file_processor/embedder/openai_embedder.py index 4f979e1f..743559d8 100644 --- a/letta/services/file_processor/embedder/openai_embedder.py +++ b/letta/services/file_processor/embedder/openai_embedder.py @@ -136,7 +136,7 @@ class OpenAIEmbedder(BaseEmbedder): ) # Extract just the chunk text and indices for processing - chunk_indices = [i for i, _ in valid_chunks] + [i for i, _ in valid_chunks] chunks_to_embed = [chunk for _, chunk in valid_chunks] embedding_start = time.time() diff --git a/letta/services/files_agents_manager.py b/letta/services/files_agents_manager.py index d45947a9..7cccc6d9 100644 --- a/letta/services/files_agents_manager.py +++ b/letta/services/files_agents_manager.py @@ -5,6 +5,7 @@ from sqlalchemy import and_, delete, func, or_, select, update from letta.log import get_logger from letta.orm.errors import NoResultFound +from letta.orm.file import FileMetadata as FileMetadataModel from letta.orm.files_agents import FileAgent as FileAgentModel from letta.otel.tracing import trace_method from letta.schemas.block import Block as PydanticBlock, FileBlock as PydanticFileBlock @@ -48,7 +49,7 @@ class FileAgentManager: """ if is_open: # Use the efficient LRU + open method - closed_files, was_already_open, _ = await self.enforce_max_open_files_and_open( + closed_files, _was_already_open, _ = await self.enforce_max_open_files_and_open( agent_id=agent_id, file_id=file_id, file_name=file_name, @@ -696,6 +697,20 @@ class FileAgentManager: closed_file_names.extend(new_names[max_files_open:]) evicted_ids = [r.file_id for r in currently_open if r.file_name in closed_file_names] + # validate file IDs exist to prevent FK violations (files may have been deleted) + requested_file_ids = {meta.id for meta in ordered_unique} + existing_file_ids_q = select(FileMetadataModel.id).where(FileMetadataModel.id.in_(requested_file_ids)) + existing_file_ids = set((await session.execute(existing_file_ids_q)).scalars().all()) + missing_file_ids = requested_file_ids - existing_file_ids + if missing_file_ids: + logger.warning( + "attach_files_bulk: skipping %d file(s) with missing records for agent %s: %s", + len(missing_file_ids), + agent_id, + missing_file_ids, + ) + ordered_unique = [m for m in ordered_unique if m.id in existing_file_ids] + # upsert requested files for meta in ordered_unique: is_now_open = meta.file_name in final_open_set diff --git a/letta/services/group_manager.py b/letta/services/group_manager.py index 4e2fd58b..1a570846 100644 --- a/letta/services/group_manager.py +++ b/letta/services/group_manager.py @@ -238,7 +238,7 @@ class GroupManager: async def reset_messages_async(self, group_id: str, actor: PydanticUser) -> None: async with db_registry.async_session() as session: # Ensure group is loadable by user - group = await GroupModel.read_async(db_session=session, identifier=group_id, actor=actor) + await GroupModel.read_async(db_session=session, identifier=group_id, actor=actor) # Delete all messages in the group delete_stmt = delete(MessageModel).where( diff --git a/letta/services/helpers/agent_manager_helper.py b/letta/services/helpers/agent_manager_helper.py index 1bcf6683..eb313905 100644 --- a/letta/services/helpers/agent_manager_helper.py +++ b/letta/services/helpers/agent_manager_helper.py @@ -369,25 +369,17 @@ def initialize_message_sequence( # Some LMStudio models (e.g. meta-llama-3.1) require the user message before any tool calls if llm_config.provider_name == "lmstudio_openai": - messages = ( - [ - {"role": "system", "content": full_system_message}, - ] - + [ - {"role": "user", "content": first_user_message}, - ] - + initial_boot_messages - ) + messages = [ + {"role": "system", "content": full_system_message}, + {"role": "user", "content": first_user_message}, + *initial_boot_messages, + ] else: - messages = ( - [ - {"role": "system", "content": full_system_message}, - ] - + initial_boot_messages - + [ - {"role": "user", "content": first_user_message}, - ] - ) + messages = [ + {"role": "system", "content": full_system_message}, + *initial_boot_messages, + {"role": "user", "content": first_user_message}, + ] else: messages = [ @@ -442,25 +434,17 @@ async def initialize_message_sequence_async( # Some LMStudio models (e.g. meta-llama-3.1) require the user message before any tool calls if llm_config.provider_name == "lmstudio_openai": - messages = ( - [ - {"role": "system", "content": full_system_message}, - ] - + [ - {"role": "user", "content": first_user_message}, - ] - + initial_boot_messages - ) + messages = [ + {"role": "system", "content": full_system_message}, + {"role": "user", "content": first_user_message}, + *initial_boot_messages, + ] else: - messages = ( - [ - {"role": "system", "content": full_system_message}, - ] - + initial_boot_messages - + [ - {"role": "user", "content": first_user_message}, - ] - ) + messages = [ + {"role": "system", "content": full_system_message}, + *initial_boot_messages, + {"role": "user", "content": first_user_message}, + ] else: messages = [ @@ -822,7 +806,7 @@ def get_column_names_from_includes_params( include_relationships: Optional[List[str]] = None, includes: Optional[List[str]] = None ) -> Set[str]: include_mapping = { - "agent.blocks": ["core_memory", "file_agents"], + "agent.blocks": ["core_memory", "file_agents", "tags"], "agent.identities": ["identities"], "agent.managed_group": ["multi_agent_group"], "agent.secrets": ["tool_exec_environment_variables"], @@ -830,7 +814,7 @@ def get_column_names_from_includes_params( "agent.tags": ["tags"], "agent.tools": ["tools"], # legacy - "memory": ["core_memory", "file_agents"], + "memory": ["core_memory", "file_agents", "tags"], "identity_ids": ["identities"], "multi_agent_group": ["multi_agent_group"], "tool_exec_environment_variables": ["tool_exec_environment_variables"], diff --git a/letta/services/identity_manager.py b/letta/services/identity_manager.py index fc416094..5c510408 100644 --- a/letta/services/identity_manager.py +++ b/letta/services/identity_manager.py @@ -1,4 +1,3 @@ -import asyncio from typing import List, Optional from fastapi import HTTPException diff --git a/letta/services/job_manager.py b/letta/services/job_manager.py index 0b39ece1..eaabce32 100644 --- a/letta/services/job_manager.py +++ b/letta/services/job_manager.py @@ -1,5 +1,4 @@ -from functools import partial, reduce -from operator import add +from functools import partial from typing import List, Literal, Optional, Union from httpx import AsyncClient, post @@ -10,9 +9,8 @@ from letta.helpers.datetime_helpers import get_utc_time from letta.log import get_logger from letta.orm.errors import NoResultFound from letta.orm.job import Job as JobModel -from letta.orm.message import Message as MessageModel from letta.orm.sqlalchemy_base import AccessType -from letta.orm.step import Step, Step as StepModel +from letta.orm.step import Step as StepModel from letta.otel.tracing import log_event, trace_method from letta.schemas.enums import JobStatus, JobType, MessageRole, PrimitiveType from letta.schemas.job import BatchJob as PydanticBatchJob, Job as PydanticJob, JobUpdate, LettaRequestConfig @@ -21,7 +19,6 @@ from letta.schemas.letta_stop_reason import StopReasonType from letta.schemas.message import Message as PydanticMessage from letta.schemas.run import Run as PydanticRun from letta.schemas.step import Step as PydanticStep -from letta.schemas.usage import LettaUsageStatistics from letta.schemas.user import User as PydanticUser from letta.server.db import db_registry from letta.services.helpers.agent_manager_helper import validate_agent_exists_async diff --git a/letta/services/llm_trace_reader.py b/letta/services/llm_trace_reader.py new file mode 100644 index 00000000..10105e90 --- /dev/null +++ b/letta/services/llm_trace_reader.py @@ -0,0 +1,462 @@ +"""ClickHouse reader for LLM analytics traces. + +Reads LLM traces from ClickHouse for debugging, analytics, and auditing. +""" + +from __future__ import annotations + +import asyncio +from dataclasses import dataclass +from datetime import datetime +from typing import Any, List, Optional +from urllib.parse import urlparse + +from letta.helpers.singleton import singleton +from letta.log import get_logger +from letta.schemas.llm_trace import LLMTrace +from letta.settings import settings + +logger = get_logger(__name__) + + +def _parse_clickhouse_endpoint(endpoint: str) -> tuple[str, int, bool]: + """Return (host, port, secure) for clickhouse_connect.get_client. + + Supports: + - http://host:port -> (host, port, False) + - https://host:port -> (host, port, True) + - host:port -> (host, port, False) # Default to insecure for local dev + - host -> (host, 8123, False) # Default HTTP port, insecure + """ + parsed = urlparse(endpoint) + + if parsed.scheme in ("http", "https"): + host = parsed.hostname or "" + port = parsed.port or (8443 if parsed.scheme == "https" else 8123) + secure = parsed.scheme == "https" + return host, port, secure + + # Fallback: accept raw hostname (possibly with :port) + # Default to insecure (HTTP) for local development + if ":" in endpoint: + host, port_str = endpoint.rsplit(":", 1) + return host, int(port_str), False + + return endpoint, 8123, False + + +@dataclass(frozen=True) +class LLMTraceRow: + """Raw row from ClickHouse query.""" + + id: str + organization_id: str + project_id: str + agent_id: str + agent_tags: List[str] + run_id: str + step_id: str + trace_id: str + call_type: str + provider: str + model: str + is_byok: bool + request_size_bytes: int + response_size_bytes: int + prompt_tokens: int + completion_tokens: int + total_tokens: int + cached_input_tokens: Optional[int] + cache_write_tokens: Optional[int] + reasoning_tokens: Optional[int] + latency_ms: int + is_error: bool + error_type: str + error_message: str + request_json: str + response_json: str + llm_config_json: str + created_at: datetime + + +@singleton +class LLMTraceReader: + """ + ClickHouse reader for raw LLM traces. + + Provides query methods for debugging, analytics, and auditing. + + Usage: + reader = LLMTraceReader() + trace = await reader.get_by_step_id_async(step_id="step-xxx", organization_id="org-xxx") + traces = await reader.list_by_agent_async(agent_id="agent-xxx", organization_id="org-xxx") + """ + + def __init__(self): + self._client = None + + def _get_client(self): + """Initialize ClickHouse client on first use (lazy loading).""" + if self._client is not None: + return self._client + + import clickhouse_connect + + if not settings.clickhouse_endpoint: + raise ValueError("CLICKHOUSE_ENDPOINT is required") + + host, port, secure = _parse_clickhouse_endpoint(settings.clickhouse_endpoint) + if not host: + raise ValueError("Invalid CLICKHOUSE_ENDPOINT") + + database = settings.clickhouse_database or "otel" + username = settings.clickhouse_username or "default" + password = settings.clickhouse_password + if not password: + raise ValueError("CLICKHOUSE_PASSWORD is required") + + self._client = clickhouse_connect.get_client( + host=host, + port=port, + username=username, + password=password, + database=database, + secure=secure, + verify=True, + ) + return self._client + + def _row_to_trace(self, row: tuple) -> LLMTrace: + """Convert a ClickHouse row tuple to LLMTrace.""" + return LLMTrace( + id=row[0], + organization_id=row[1], + project_id=row[2] or None, + agent_id=row[3] or None, + agent_tags=list(row[4]) if row[4] else [], + run_id=row[5] or None, + step_id=row[6] or None, + trace_id=row[7] or None, + call_type=row[8], + provider=row[9], + model=row[10], + is_byok=bool(row[11]), + request_size_bytes=row[12], + response_size_bytes=row[13], + prompt_tokens=row[14], + completion_tokens=row[15], + total_tokens=row[16], + cached_input_tokens=row[17], + cache_write_tokens=row[18], + reasoning_tokens=row[19], + latency_ms=row[20], + is_error=bool(row[21]), + error_type=row[22] or None, + error_message=row[23] or None, + request_json=row[24], + response_json=row[25], + llm_config_json=row[26] or "", + created_at=row[27], + ) + + def _query_sync(self, query: str, parameters: dict[str, Any]) -> List[tuple]: + """Execute a query synchronously.""" + client = self._get_client() + result = client.query(query, parameters=parameters) + return result.result_rows if result else [] + + # ------------------------------------------------------------------------- + # Query Methods + # ------------------------------------------------------------------------- + + async def get_by_step_id_async( + self, + step_id: str, + organization_id: str, + ) -> Optional[LLMTrace]: + """ + Get the most recent trace for a step. + + Args: + step_id: The step ID to look up + organization_id: Organization ID for access control + + Returns: + LLMTrace if found, None otherwise + """ + query = """ + SELECT + id, organization_id, project_id, agent_id, agent_tags, run_id, step_id, trace_id, + call_type, provider, model, is_byok, + request_size_bytes, response_size_bytes, + prompt_tokens, completion_tokens, total_tokens, + cached_input_tokens, cache_write_tokens, reasoning_tokens, + latency_ms, + is_error, error_type, error_message, + request_json, response_json, llm_config_json, + created_at + FROM llm_traces + WHERE step_id = %(step_id)s + AND organization_id = %(organization_id)s + ORDER BY created_at DESC + LIMIT 1 + """ + + rows = await asyncio.to_thread( + self._query_sync, + query, + {"step_id": step_id, "organization_id": organization_id}, + ) + + if not rows: + return None + + return self._row_to_trace(rows[0]) + + async def get_by_id_async( + self, + trace_id: str, + organization_id: str, + ) -> Optional[LLMTrace]: + """ + Get a trace by its ID. + + Args: + trace_id: The trace ID (UUID) + organization_id: Organization ID for access control + + Returns: + LLMTrace if found, None otherwise + """ + query = """ + SELECT + id, organization_id, project_id, agent_id, agent_tags, run_id, step_id, trace_id, + call_type, provider, model, is_byok, + request_size_bytes, response_size_bytes, + prompt_tokens, completion_tokens, total_tokens, + cached_input_tokens, cache_write_tokens, reasoning_tokens, + latency_ms, + is_error, error_type, error_message, + request_json, response_json, llm_config_json, + created_at + FROM llm_traces + WHERE id = %(trace_id)s + AND organization_id = %(organization_id)s + LIMIT 1 + """ + + rows = await asyncio.to_thread( + self._query_sync, + query, + {"trace_id": trace_id, "organization_id": organization_id}, + ) + + if not rows: + return None + + return self._row_to_trace(rows[0]) + + async def list_by_agent_async( + self, + agent_id: str, + organization_id: str, + limit: int = 100, + offset: int = 0, + call_type: Optional[str] = None, + is_error: Optional[bool] = None, + start_date: Optional[datetime] = None, + end_date: Optional[datetime] = None, + ) -> List[LLMTrace]: + """ + List traces for an agent with optional filters. + + Args: + agent_id: Agent ID to filter by + organization_id: Organization ID for access control + limit: Maximum number of results (default 100) + offset: Offset for pagination + call_type: Filter by call type ('agent_step', 'summarization') + is_error: Filter by error status + start_date: Filter by created_at >= start_date + end_date: Filter by created_at <= end_date + + Returns: + List of LLMTrace objects + """ + conditions = [ + "agent_id = %(agent_id)s", + "organization_id = %(organization_id)s", + ] + params: dict[str, Any] = { + "agent_id": agent_id, + "organization_id": organization_id, + "limit": limit, + "offset": offset, + } + + if call_type: + conditions.append("call_type = %(call_type)s") + params["call_type"] = call_type + + if is_error is not None: + conditions.append("is_error = %(is_error)s") + params["is_error"] = 1 if is_error else 0 + + if start_date: + conditions.append("created_at >= %(start_date)s") + params["start_date"] = start_date + + if end_date: + conditions.append("created_at <= %(end_date)s") + params["end_date"] = end_date + + where_clause = " AND ".join(conditions) + + query = f""" + SELECT + id, organization_id, project_id, agent_id, agent_tags, run_id, step_id, trace_id, + call_type, provider, model, is_byok, + request_size_bytes, response_size_bytes, + prompt_tokens, completion_tokens, total_tokens, + cached_input_tokens, cache_write_tokens, reasoning_tokens, + latency_ms, + is_error, error_type, error_message, + request_json, response_json, llm_config_json, + created_at + FROM llm_traces + WHERE {where_clause} + ORDER BY created_at DESC + LIMIT %(limit)s OFFSET %(offset)s + """ + + rows = await asyncio.to_thread(self._query_sync, query, params) + return [self._row_to_trace(row) for row in rows] + + async def get_usage_stats_async( + self, + organization_id: str, + start_date: Optional[datetime] = None, + end_date: Optional[datetime] = None, + group_by: str = "model", # 'model', 'agent_id', 'call_type' + ) -> List[dict[str, Any]]: + """ + Get aggregated usage statistics. + + Args: + organization_id: Organization ID for access control + start_date: Filter by created_at >= start_date + end_date: Filter by created_at <= end_date + group_by: Field to group by ('model', 'agent_id', 'call_type') + + Returns: + List of aggregated stats dicts + """ + valid_group_by = {"model", "agent_id", "call_type", "provider"} + if group_by not in valid_group_by: + raise ValueError(f"group_by must be one of {valid_group_by}") + + conditions = ["organization_id = %(organization_id)s"] + params: dict[str, Any] = {"organization_id": organization_id} + + if start_date: + conditions.append("created_at >= %(start_date)s") + params["start_date"] = start_date + + if end_date: + conditions.append("created_at <= %(end_date)s") + params["end_date"] = end_date + + where_clause = " AND ".join(conditions) + + query = f""" + SELECT + {group_by}, + count() as request_count, + sum(total_tokens) as total_tokens, + sum(prompt_tokens) as prompt_tokens, + sum(completion_tokens) as completion_tokens, + avg(latency_ms) as avg_latency_ms, + sum(request_size_bytes) as total_request_bytes, + sum(response_size_bytes) as total_response_bytes, + countIf(is_error = 1) as error_count + FROM llm_traces + WHERE {where_clause} + GROUP BY {group_by} + ORDER BY total_tokens DESC + """ + + rows = await asyncio.to_thread(self._query_sync, query, params) + + return [ + { + group_by: row[0], + "request_count": row[1], + "total_tokens": row[2], + "prompt_tokens": row[3], + "completion_tokens": row[4], + "avg_latency_ms": row[5], + "total_request_bytes": row[6], + "total_response_bytes": row[7], + "error_count": row[8], + } + for row in rows + ] + + async def find_large_requests_async( + self, + organization_id: str, + min_size_bytes: int = 1_000_000, # 1MB default + limit: int = 100, + ) -> List[LLMTrace]: + """ + Find traces with large request payloads (for debugging). + + Args: + organization_id: Organization ID for access control + min_size_bytes: Minimum request size in bytes (default 1MB) + limit: Maximum number of results + + Returns: + List of LLMTrace objects with large requests + """ + query = """ + SELECT + id, organization_id, project_id, agent_id, agent_tags, run_id, step_id, trace_id, + call_type, provider, model, is_byok, + request_size_bytes, response_size_bytes, + prompt_tokens, completion_tokens, total_tokens, + cached_input_tokens, cache_write_tokens, reasoning_tokens, + latency_ms, + is_error, error_type, error_message, + request_json, response_json, llm_config_json, + created_at + FROM llm_traces + WHERE organization_id = %(organization_id)s + AND request_size_bytes >= %(min_size_bytes)s + ORDER BY request_size_bytes DESC + LIMIT %(limit)s + """ + + rows = await asyncio.to_thread( + self._query_sync, + query, + { + "organization_id": organization_id, + "min_size_bytes": min_size_bytes, + "limit": limit, + }, + ) + + return [self._row_to_trace(row) for row in rows] + + +# Module-level instance for easy access +_reader_instance: Optional[LLMTraceReader] = None + + +def get_llm_trace_reader() -> LLMTraceReader: + """Get the singleton LLMTraceReader instance.""" + global _reader_instance + if _reader_instance is None: + _reader_instance = LLMTraceReader() + return _reader_instance diff --git a/letta/services/llm_trace_writer.py b/letta/services/llm_trace_writer.py new file mode 100644 index 00000000..9e671d20 --- /dev/null +++ b/letta/services/llm_trace_writer.py @@ -0,0 +1,207 @@ +"""ClickHouse writer for LLM analytics traces. + +Writes LLM traces to ClickHouse with denormalized columns for cost analytics. +Uses ClickHouse's async_insert feature for server-side batching. +""" + +from __future__ import annotations + +import asyncio +import atexit +from typing import TYPE_CHECKING, Optional +from urllib.parse import urlparse + +from letta.helpers.singleton import singleton +from letta.log import get_logger +from letta.settings import settings + +if TYPE_CHECKING: + from letta.schemas.llm_trace import LLMTrace + +logger = get_logger(__name__) + +# Retry configuration +MAX_RETRIES = 3 +INITIAL_BACKOFF_SECONDS = 1.0 + +_background_tasks: set[asyncio.Task] = set() + + +def _parse_clickhouse_endpoint(endpoint: str) -> tuple[str, int, bool]: + """Return (host, port, secure) for clickhouse_connect.get_client. + + Supports: + - http://host:port -> (host, port, False) + - https://host:port -> (host, port, True) + - host:port -> (host, port, False) # Default to insecure for local dev + - host -> (host, 8123, False) # Default HTTP port, insecure + """ + parsed = urlparse(endpoint) + + if parsed.scheme in ("http", "https"): + host = parsed.hostname or "" + port = parsed.port or (8443 if parsed.scheme == "https" else 8123) + secure = parsed.scheme == "https" + return host, port, secure + + # Fallback: accept raw hostname (possibly with :port) + # Default to insecure (HTTP) for local development + if ":" in endpoint: + host, port_str = endpoint.rsplit(":", 1) + return host, int(port_str), False + + return endpoint, 8123, False + + +@singleton +class LLMTraceWriter: + """ + Direct ClickHouse writer for raw LLM traces. + + Uses ClickHouse's async_insert feature for server-side batching. + Each trace is inserted directly and ClickHouse handles batching + for optimal write performance. + + Usage: + writer = LLMTraceWriter() + await writer.write_async(trace) + + Configuration (via settings): + - store_llm_traces: Enable/disable (default: False) + """ + + def __init__(self): + self._client = None + self._shutdown = False + self._write_lock = asyncio.Lock() # Serialize writes - clickhouse_connect isn't thread-safe + + # Check if ClickHouse is configured - if not, writing is disabled + self._enabled = bool(settings.clickhouse_endpoint and settings.clickhouse_password) + + # Register shutdown handler + atexit.register(self._sync_shutdown) + + def _get_client(self): + """Initialize ClickHouse client on first use (lazy loading). + + Configures async_insert with wait_for_async_insert=1 for reliable + server-side batching with acknowledgment. + """ + if self._client is not None: + return self._client + + # Import lazily so OSS users who never enable this don't pay import cost + import clickhouse_connect + + host, port, secure = _parse_clickhouse_endpoint(settings.clickhouse_endpoint) + database = settings.clickhouse_database or "otel" + username = settings.clickhouse_username or "default" + + self._client = clickhouse_connect.get_client( + host=host, + port=port, + username=username, + password=settings.clickhouse_password, + database=database, + secure=secure, + verify=True, + settings={ + # Enable server-side batching + "async_insert": 1, + # Wait for acknowledgment (reliable) + "wait_for_async_insert": 1, + # Flush after 1 second if batch not full + "async_insert_busy_timeout_ms": 1000, + }, + ) + logger.info(f"LLMTraceWriter: Connected to ClickHouse at {host}:{port}/{database} (async_insert enabled)") + return self._client + + async def write_async(self, trace: "LLMTrace") -> None: + """ + Write a trace to ClickHouse (fire-and-forget with retry). + + ClickHouse's async_insert handles batching server-side for optimal + write performance. This method retries on failure with exponential + backoff. + + Args: + trace: The LLMTrace to write + """ + if not self._enabled or self._shutdown: + return + + try: + task = asyncio.create_task(self._write_with_retry(trace)) + _background_tasks.add(task) + task.add_done_callback(_background_tasks.discard) + except RuntimeError: + pass + + async def _write_with_retry(self, trace: "LLMTrace") -> None: + """Write a single trace with retry on failure.""" + from letta.schemas.llm_trace import LLMTrace + + for attempt in range(MAX_RETRIES): + try: + client = self._get_client() + row = trace.to_clickhouse_row() + columns = LLMTrace.clickhouse_columns() + + # Serialize writes - clickhouse_connect client isn't thread-safe + async with self._write_lock: + # Run synchronous insert in thread pool + await asyncio.to_thread( + client.insert, + "llm_traces", + [row], + column_names=columns, + ) + return # Success + + except Exception as e: + if attempt < MAX_RETRIES - 1: + backoff = INITIAL_BACKOFF_SECONDS * (2**attempt) + logger.warning(f"LLMTraceWriter: Retry {attempt + 1}/{MAX_RETRIES}, backoff {backoff}s: {e}") + await asyncio.sleep(backoff) + else: + logger.error(f"LLMTraceWriter: Dropping trace after {MAX_RETRIES} retries: {e}") + + async def shutdown_async(self) -> None: + """Gracefully shutdown the writer.""" + self._shutdown = True + + # Close client + if self._client: + try: + self._client.close() + except Exception as e: + logger.warning(f"LLMTraceWriter: Error closing client: {e}") + self._client = None + + logger.info("LLMTraceWriter: Shutdown complete") + + def _sync_shutdown(self) -> None: + """Synchronous shutdown handler for atexit.""" + if not self._enabled or self._shutdown: + return + + self._shutdown = True + + if self._client: + try: + self._client.close() + except Exception: + pass + + +# Module-level instance for easy access +_writer_instance: Optional[LLMTraceWriter] = None + + +def get_llm_trace_writer() -> LLMTraceWriter: + """Get the singleton LLMTraceWriter instance.""" + global _writer_instance + if _writer_instance is None: + _writer_instance = LLMTraceWriter() + return _writer_instance diff --git a/letta/services/mcp/base_client.py b/letta/services/mcp/base_client.py index d297e83f..c53b6f99 100644 --- a/letta/services/mcp/base_client.py +++ b/letta/services/mcp/base_client.py @@ -5,11 +5,37 @@ from mcp import ClientSession, Tool as MCPTool from mcp.client.auth import OAuthClientProvider from mcp.types import TextContent +from letta.errors import LettaMCPConnectionError from letta.functions.mcp_client.types import BaseServerConfig from letta.log import get_logger logger = get_logger(__name__) +EXPECTED_MCP_TOOL_ERRORS = ( + "McpError", + "ToolError", + "HTTPStatusError", + "ConnectError", + "ConnectTimeout", + "ReadTimeout", + "ReadError", + "RemoteProtocolError", + "LocalProtocolError", + "ConnectionError", + "SSLError", + "MaxRetryError", + "ProtocolError", + "BrokenResourceError", +) + + +def _log_mcp_tool_error(log: "get_logger", tool_name: str, exc: Exception) -> None: + exc_name = type(exc).__name__ + if exc_name in EXPECTED_MCP_TOOL_ERRORS: + log.info(f"MCP tool '{tool_name}' execution failed ({exc_name}): {exc}") + else: + log.warning(f"MCP tool '{tool_name}' execution failed with unexpected error ({exc_name}): {exc}", exc_info=True) + # TODO: Get rid of Async prefix on this class name once we deprecate old sync code class AsyncBaseMCPClient: @@ -31,14 +57,12 @@ class AsyncBaseMCPClient: await self._initialize_connection(self.server_config) await self.session.initialize() self.initialized = True + except LettaMCPConnectionError: + raise except ConnectionError as e: - # MCP connection failures are often due to user misconfiguration, not system errors - # Log at debug level to avoid triggering Sentry alerts for expected configuration issues logger.debug(f"MCP connection failed: {str(e)}") - raise e + raise LettaMCPConnectionError(message=str(e), server_name=getattr(self.server_config, "server_name", None)) from e except Exception as e: - # MCP connection failures are often due to user misconfiguration, not system errors - # Log as warning for visibility in monitoring logger.warning( f"Connecting to MCP server failed. Please review your server config: {self.server_config.model_dump_json(indent=4)}. Error: {str(e)}" ) @@ -48,8 +72,9 @@ class AsyncBaseMCPClient: server_info = f"command '{self.server_config.command}'" else: server_info = f"server '{self.server_config.server_name}'" - raise ConnectionError( - f"Failed to connect to MCP {server_info}. Please check your configuration and ensure the server is accessible." + raise LettaMCPConnectionError( + message=f"Failed to connect to MCP {server_info}. Please check your configuration and ensure the server is accessible.", + server_name=getattr(self.server_config, "server_name", None), ) from e async def _initialize_connection(self, server_config: BaseServerConfig) -> None: @@ -81,13 +106,11 @@ class AsyncBaseMCPClient: try: result = await self.session.call_tool(tool_name, tool_args) except Exception as e: - # ToolError is raised by fastmcp for input validation errors (e.g., missing required properties) - # McpError is raised for other MCP-related errors - # Both are expected user-facing issues from external MCP servers - # Log at debug level to avoid triggering production alerts for expected failures - if e.__class__.__name__ in ("McpError", "ToolError"): - logger.debug(f"MCP tool '{tool_name}' execution failed: {str(e)}") - raise + exception_to_check = e + if hasattr(e, "exceptions") and e.exceptions and len(e.exceptions) == 1: + exception_to_check = e.exceptions[0] + _log_mcp_tool_error(logger, tool_name, exception_to_check) + return str(exception_to_check), False parsed_content = [] for content_piece in result.content: diff --git a/letta/services/mcp/fastmcp_client.py b/letta/services/mcp/fastmcp_client.py index e3c901de..2a2b05c2 100644 --- a/letta/services/mcp/fastmcp_client.py +++ b/letta/services/mcp/fastmcp_client.py @@ -16,8 +16,10 @@ from fastmcp import Client from fastmcp.client.transports import SSETransport, StreamableHttpTransport from mcp import Tool as MCPTool +from letta.errors import LettaMCPConnectionError from letta.functions.mcp_client.types import SSEServerConfig, StreamableHTTPServerConfig from letta.log import get_logger +from letta.services.mcp.base_client import _log_mcp_tool_error from letta.services.mcp.server_side_oauth import ServerSideOAuth logger = get_logger(__name__) @@ -76,22 +78,24 @@ class AsyncFastMCPSSEClient: await self.client._connect() self.initialized = True except httpx.HTTPStatusError as e: - # Re-raise HTTP status errors for OAuth flow handling if e.response.status_code == 401: - raise ConnectionError("401 Unauthorized") from e - raise ConnectionError(f"HTTP error connecting to MCP server at {self.server_config.server_url}: {e}") from e - except ConnectionError: - # Re-raise ConnectionError as-is + raise LettaMCPConnectionError(message="401 Unauthorized", server_name=self.server_config.server_name) from e + raise LettaMCPConnectionError( + message=f"HTTP error connecting to MCP server at {self.server_config.server_url}: {e}", + server_name=self.server_config.server_name, + ) from e + except LettaMCPConnectionError: raise + except ConnectionError as e: + raise LettaMCPConnectionError(message=str(e), server_name=self.server_config.server_name) from e except Exception as e: - # MCP connection failures are often due to user misconfiguration, not system errors - # Log as warning for visibility in monitoring logger.warning( f"Connecting to MCP server failed. Please review your server config: {self.server_config.model_dump_json(indent=4)}. Error: {str(e)}" ) - raise ConnectionError( - f"Failed to connect to MCP server at '{self.server_config.server_url}'. " - f"Please check your configuration and ensure the server is accessible. Error: {str(e)}" + raise LettaMCPConnectionError( + message=f"Failed to connect to MCP server at '{self.server_config.server_url}'. " + f"Please check your configuration and ensure the server is accessible. Error: {str(e)}", + server_name=self.server_config.server_name, ) from e async def list_tools(self, serialize: bool = False) -> List[MCPTool]: @@ -139,13 +143,11 @@ class AsyncFastMCPSSEClient: try: result = await self.client.call_tool(tool_name, tool_args) except Exception as e: - # ToolError is raised by fastmcp for input validation errors (e.g., missing required properties) - # McpError is raised for other MCP-related errors - # Both are expected user-facing issues from external MCP servers - # Log at debug level to avoid triggering production alerts for expected failures - if e.__class__.__name__ in ("McpError", "ToolError"): - logger.debug(f"MCP tool '{tool_name}' execution failed: {str(e)}") - raise + exception_to_check = e + if hasattr(e, "exceptions") and e.exceptions and len(e.exceptions) == 1: + exception_to_check = e.exceptions[0] + _log_mcp_tool_error(logger, tool_name, exception_to_check) + return str(exception_to_check), False # Parse content from result parsed_content = [] @@ -233,22 +235,24 @@ class AsyncFastMCPStreamableHTTPClient: await self.client._connect() self.initialized = True except httpx.HTTPStatusError as e: - # Re-raise HTTP status errors for OAuth flow handling if e.response.status_code == 401: - raise ConnectionError("401 Unauthorized") from e - raise ConnectionError(f"HTTP error connecting to MCP server at {self.server_config.server_url}: {e}") from e - except ConnectionError: - # Re-raise ConnectionError as-is + raise LettaMCPConnectionError(message="401 Unauthorized", server_name=self.server_config.server_name) from e + raise LettaMCPConnectionError( + message=f"HTTP error connecting to MCP server at {self.server_config.server_url}: {e}", + server_name=self.server_config.server_name, + ) from e + except LettaMCPConnectionError: raise + except ConnectionError as e: + raise LettaMCPConnectionError(message=str(e), server_name=self.server_config.server_name) from e except Exception as e: - # MCP connection failures are often due to user misconfiguration, not system errors - # Log as warning for visibility in monitoring logger.warning( f"Connecting to MCP server failed. Please review your server config: {self.server_config.model_dump_json(indent=4)}. Error: {str(e)}" ) - raise ConnectionError( - f"Failed to connect to MCP server at '{self.server_config.server_url}'. " - f"Please check your configuration and ensure the server is accessible. Error: {str(e)}" + raise LettaMCPConnectionError( + message=f"Failed to connect to MCP server at '{self.server_config.server_url}'. " + f"Please check your configuration and ensure the server is accessible. Error: {str(e)}", + server_name=self.server_config.server_name, ) from e async def list_tools(self, serialize: bool = False) -> List[MCPTool]: @@ -296,13 +300,11 @@ class AsyncFastMCPStreamableHTTPClient: try: result = await self.client.call_tool(tool_name, tool_args) except Exception as e: - # ToolError is raised by fastmcp for input validation errors (e.g., missing required properties) - # McpError is raised for other MCP-related errors - # Both are expected user-facing issues from external MCP servers - # Log at debug level to avoid triggering production alerts for expected failures - if e.__class__.__name__ in ("McpError", "ToolError"): - logger.debug(f"MCP tool '{tool_name}' execution failed: {str(e)}") - raise + exception_to_check = e + if hasattr(e, "exceptions") and e.exceptions and len(e.exceptions) == 1: + exception_to_check = e.exceptions[0] + _log_mcp_tool_error(logger, tool_name, exception_to_check) + return str(exception_to_check), False # Parse content from result parsed_content = [] diff --git a/letta/services/mcp/oauth_utils.py b/letta/services/mcp/oauth_utils.py index 52599008..32114db5 100644 --- a/letta/services/mcp/oauth_utils.py +++ b/letta/services/mcp/oauth_utils.py @@ -4,7 +4,6 @@ import asyncio import json import secrets import time -import uuid from datetime import datetime, timedelta from typing import TYPE_CHECKING, Callable, Optional, Tuple @@ -94,16 +93,20 @@ class DatabaseTokenStorage(TokenStorage): class MCPOAuthSession: """Legacy OAuth session class - deprecated, use mcp_manager directly.""" - def __init__(self, server_url: str, server_name: str, user_id: Optional[str], organization_id: str): + def __init__( + self, + session_id: str, + server_url: Optional[str] = None, + server_name: Optional[str] = None, + user_id: Optional[str] = None, + organization_id: Optional[str] = None, + ): + self.session_id = session_id self.server_url = server_url self.server_name = server_name self.user_id = user_id self.organization_id = organization_id - self.session_id = str(uuid.uuid4()) - self.state = secrets.token_urlsafe(32) - - def __init__(self, session_id: str): - self.session_id = session_id + self.state = secrets.token_urlsafe(32) if server_url else None # TODO: consolidate / deprecate this in favor of mcp_manager access async def create_session(self) -> str: diff --git a/letta/services/mcp/sse_client.py b/letta/services/mcp/sse_client.py index ee8dfc17..1c4660df 100644 --- a/letta/services/mcp/sse_client.py +++ b/letta/services/mcp/sse_client.py @@ -37,7 +37,9 @@ class AsyncSSEMCPClient(AsyncBaseMCPClient): # Pass timeout to prevent httpx.ReadTimeout errors on slow connections timeout = tool_settings.mcp_connect_to_server_timeout if self.oauth_provider: - sse_cm = sse_client(url=server_config.server_url, headers=headers if headers else None, auth=self.oauth_provider, timeout=timeout) + sse_cm = sse_client( + url=server_config.server_url, headers=headers if headers else None, auth=self.oauth_provider, timeout=timeout + ) else: sse_cm = sse_client(url=server_config.server_url, headers=headers if headers else None, timeout=timeout) diff --git a/letta/services/mcp_manager.py b/letta/services/mcp_manager.py index 0f2ad4a8..b22db1eb 100644 --- a/letta/services/mcp_manager.py +++ b/letta/services/mcp_manager.py @@ -7,7 +7,7 @@ from datetime import datetime, timedelta from typing import Any, Dict, List, Optional, Tuple, Union from fastapi import HTTPException -from sqlalchemy import delete, desc, null, select +from sqlalchemy import delete, desc, select from starlette.requests import Request import letta.constants as constants @@ -48,7 +48,7 @@ from letta.services.mcp.server_side_oauth import ServerSideOAuth from letta.services.mcp.sse_client import MCP_CONFIG_TOPLEVEL_KEY from letta.services.mcp.stdio_client import AsyncStdioMCPClient from letta.services.tool_manager import ToolManager -from letta.settings import settings, tool_settings +from letta.settings import tool_settings from letta.utils import enforce_types, printd, safe_create_task_with_return from letta.validators import raise_on_invalid_id @@ -403,7 +403,7 @@ class MCPManager: # context manager now handles commits # await session.commit() return mcp_server.to_pydantic() - except Exception as e: + except Exception: await session.rollback() raise @@ -483,7 +483,6 @@ class MCPManager: 2. Attempts to connect and fetch tools 3. Persists valid tools in parallel (best-effort) """ - import asyncio # First, create the MCP server created_server = await self.create_mcp_server(pydantic_mcp_server, actor) @@ -1194,7 +1193,7 @@ class MCPManager: # Give the OAuth flow time to connect to the MCP server and store the authorization URL timeout = 0 - while not auth_session or not auth_session.authorization_url and not connect_task.done() and timeout < 10: + while not auth_session or (not auth_session.authorization_url and not connect_task.done() and timeout < 10): timeout += 1 auth_session = await self.get_oauth_session_by_id(session_id, actor) await asyncio.sleep(1.0) diff --git a/letta/services/mcp_server_manager.py b/letta/services/mcp_server_manager.py index f1981a03..70cbe651 100644 --- a/letta/services/mcp_server_manager.py +++ b/letta/services/mcp_server_manager.py @@ -46,7 +46,7 @@ from letta.services.mcp.server_side_oauth import ServerSideOAuth from letta.services.mcp.sse_client import MCP_CONFIG_TOPLEVEL_KEY from letta.services.mcp.stdio_client import AsyncStdioMCPClient from letta.services.tool_manager import ToolManager -from letta.settings import settings, tool_settings +from letta.settings import tool_settings from letta.utils import enforce_types, printd, safe_create_task logger = get_logger(__name__) @@ -500,7 +500,7 @@ class MCPServerManager: # context manager now handles commits # await session.commit() return mcp_server.to_pydantic() - except Exception as e: + except Exception: await session.rollback() raise @@ -607,7 +607,6 @@ class MCPServerManager: 2. Attempts to connect and fetch tools 3. Persists valid tools in parallel (best-effort) """ - import asyncio # First, create the MCP server created_server = await self.create_mcp_server(pydantic_mcp_server, actor) diff --git a/letta/services/memory_repo/__init__.py b/letta/services/memory_repo/__init__.py new file mode 100644 index 00000000..a669ecce --- /dev/null +++ b/letta/services/memory_repo/__init__.py @@ -0,0 +1,16 @@ +"""Git-based memory repository services.""" + +from letta.services.memory_repo.storage.base import StorageBackend +from letta.services.memory_repo.storage.local import LocalStorageBackend + +# MemfsClient: try cloud implementation first, fall back to local filesystem +try: + from letta.services.memory_repo.memfs_client import MemfsClient +except ImportError: + from letta.services.memory_repo.memfs_client_base import MemfsClient + +__all__ = [ + "LocalStorageBackend", + "MemfsClient", + "StorageBackend", +] diff --git a/letta/services/memory_repo/block_markdown.py b/letta/services/memory_repo/block_markdown.py new file mode 100644 index 00000000..3c9d3e3a --- /dev/null +++ b/letta/services/memory_repo/block_markdown.py @@ -0,0 +1,195 @@ +"""Serialize and parse block data as Markdown with YAML frontmatter. + +File format: + --- + description: "Who I am and how I approach work" + limit: 20000 + --- + My name is Memo. I'm a stateful coding assistant... + +- Frontmatter fields are only rendered when they differ from defaults. +- Files without frontmatter are treated as value-only (backward compat). +""" + +from typing import Any, Dict, Optional + +import yaml + +from letta.schemas.block import BaseBlock + + +def _get_field_default(field_name: str) -> Any: + """Get the default value for a BaseBlock field.""" + field = BaseBlock.model_fields[field_name] + return field.default + + +def serialize_block( + value: str, + *, + description: Optional[str] = None, + limit: Optional[int] = None, + read_only: bool = False, + metadata: Optional[dict] = None, +) -> str: + """Serialize a block to Markdown with optional YAML frontmatter. + + This is used for initial file creation. For updates to existing files, + prefer `merge_frontmatter_with_body` to preserve user formatting. + """ + # description and limit are always included in frontmatter. + # read_only and metadata are only included when non-default. + front: Dict[str, Any] = {} + + front["description"] = description + front["limit"] = limit if limit is not None else _get_field_default("limit") + + if read_only != _get_field_default("read_only"): + front["read_only"] = read_only + if metadata and metadata != _get_field_default("metadata"): + front["metadata"] = metadata + + # Use block style for cleaner YAML, default_flow_style=False + yaml_str = yaml.dump(front, default_flow_style=False, sort_keys=False, allow_unicode=True).rstrip("\n") + return f"---\n{yaml_str}\n---\n{value}" + + +def _extract_frontmatter(content: str) -> tuple[Optional[str], str]: + """Return (frontmatter_yaml, body). + + If no valid opening/closing frontmatter delimiters are found, returns + (None, original_content). + """ + if not content.startswith("---\n"): + return None, content + + end_idx = content.find("\n---\n", 4) + if end_idx == -1: + return None, content + + yaml_str = content[4:end_idx] + body = content[end_idx + 5 :] + return yaml_str, body + + +def merge_frontmatter_with_body( + existing_content: str, + *, + value: str, + description: Optional[str], + limit: Optional[int], + read_only: bool, + metadata: Optional[dict], +) -> str: + """Update block content while preserving existing frontmatter formatting when possible. + + Behavior: + - If existing content has YAML frontmatter, parse it and update keys in-memory, + then splice back using the exact original YAML text when values are unchanged. + - If keys changed or missing, emit normalized frontmatter only for changed keys, + while preserving body exactly as provided. + - If no frontmatter exists, create one. + """ + yaml_str, _existing_body = _extract_frontmatter(existing_content) + + if yaml_str is None: + return serialize_block( + value=value, + description=description, + limit=limit, + read_only=read_only, + metadata=metadata, + ) + + try: + parsed = yaml.safe_load(yaml_str) or {} + except yaml.YAMLError: + parsed = {} + + if not isinstance(parsed, dict): + parsed = {} + + # Desired values + desired_description = description + desired_limit = limit if limit is not None else _get_field_default("limit") + desired_read_only = read_only + desired_metadata = metadata if metadata is not None else _get_field_default("metadata") + + # Track whether anything semantically changes in frontmatter. + changed = False + + if "description" not in parsed or parsed.get("description") != desired_description: + parsed["description"] = desired_description + changed = True + + if "limit" not in parsed or parsed.get("limit") != desired_limit: + parsed["limit"] = desired_limit + changed = True + + if desired_read_only != _get_field_default("read_only"): + if parsed.get("read_only") != desired_read_only: + parsed["read_only"] = desired_read_only + changed = True + elif "read_only" in parsed: + del parsed["read_only"] + changed = True + + if desired_metadata and desired_metadata != _get_field_default("metadata"): + if parsed.get("metadata") != desired_metadata: + parsed["metadata"] = desired_metadata + changed = True + elif "metadata" in parsed: + del parsed["metadata"] + changed = True + + # If frontmatter semantics unchanged, preserve original YAML formatting verbatim. + if not changed: + return f"---\n{yaml_str}\n---\n{value}" + + normalized_yaml = yaml.dump(parsed, default_flow_style=False, sort_keys=False, allow_unicode=True).rstrip("\n") + return f"---\n{normalized_yaml}\n---\n{value}" + + +def parse_block_markdown(content: str) -> Dict[str, Any]: + """Parse a Markdown file into block fields. + + Returns a dict with: + - "value": the body content after frontmatter + - "description", "limit", "read_only", "metadata": from frontmatter (if present) + + If no frontmatter is detected, the entire content is treated as the value + (backward compat with old repos that stored raw values). + """ + if not content.startswith("---\n"): + return {"value": content} + + # Find the closing --- delimiter + end_idx = content.find("\n---\n", 4) + if end_idx == -1: + # No closing delimiter — treat entire content as value + return {"value": content} + + yaml_str = content[4:end_idx] + body = content[end_idx + 5 :] # skip past \n---\n + + try: + front = yaml.safe_load(yaml_str) + except yaml.YAMLError: + # Malformed YAML — treat entire content as value + return {"value": content} + + if not isinstance(front, dict): + return {"value": content} + + result: Dict[str, Any] = {"value": body} + + if "description" in front: + result["description"] = front["description"] + if "limit" in front: + result["limit"] = front["limit"] + if "read_only" in front: + result["read_only"] = front["read_only"] + if "metadata" in front: + result["metadata"] = front["metadata"] + + return result diff --git a/letta/services/memory_repo/git_operations.py b/letta/services/memory_repo/git_operations.py new file mode 100644 index 00000000..710ff428 --- /dev/null +++ b/letta/services/memory_repo/git_operations.py @@ -0,0 +1,638 @@ +"""Git operations for memory repositories using git CLI. + +This module provides high-level operations for working with git repos +stored in object storage (GCS/S3), using the git command-line tool +instead of dulwich for better compatibility and maintenance. +""" + +import asyncio +import os +import shutil +import subprocess +import tempfile +import time +import uuid +from datetime import datetime, timezone +from typing import Dict, List, Optional + +from letta.data_sources.redis_client import get_redis_client +from letta.log import get_logger +from letta.schemas.memory_repo import FileChange, MemoryCommit +from letta.services.memory_repo.storage.base import StorageBackend + +logger = get_logger(__name__) + + +def _run_git(args: List[str], cwd: str, check: bool = True) -> subprocess.CompletedProcess: + """Run a git command and return the result. + + Args: + args: Git command arguments (without 'git' prefix) + cwd: Working directory + check: Whether to raise on non-zero exit + + Returns: + CompletedProcess with stdout/stderr + """ + result = subprocess.run( + ["git", *args], + cwd=cwd, + capture_output=True, + text=True, + check=False, + ) + if check and result.returncode != 0: + raise subprocess.CalledProcessError(result.returncode, ["git", *args], result.stdout, result.stderr) + return result + + +class GitOperations: + """High-level git operations for memory repositories. + + This class provides git operations that work with repositories + stored in object storage. It downloads the repo to a temp directory, + performs operations, and uploads the changes back. + + For efficiency with small repos (100s of files), we use a full + checkout model. For larger repos, we could optimize to work with + packfiles directly. + + Requirements: + git CLI must be installed and available in PATH + """ + + def __init__(self, storage: StorageBackend): + """Initialize git operations. + + Args: + storage: Storage backend for repo persistence + """ + self.storage = storage + self._git_available = None + + def _check_git(self) -> None: + """Check that git is available.""" + if self._git_available is None: + try: + result = subprocess.run( + ["git", "--version"], + capture_output=True, + text=True, + check=True, + ) + self._git_available = True + logger.debug(f"Git available: {result.stdout.strip()}") + except (subprocess.CalledProcessError, FileNotFoundError): + self._git_available = False + raise RuntimeError("git CLI is required for git operations but was not found in PATH") + elif not self._git_available: + raise RuntimeError("git CLI is required for git operations but was not found in PATH") + + def _repo_path(self, agent_id: str, org_id: str) -> str: + """Get the storage path for an agent's repo.""" + return f"{org_id}/{agent_id}/repo.git" + + async def create_repo( + self, + agent_id: str, + org_id: str, + initial_files: Optional[Dict[str, str]] = None, + author_name: str = "Letta System", + author_email: str = "system@letta.ai", + ) -> str: + """Create a new git repository for an agent. + + Args: + agent_id: Agent ID + org_id: Organization ID + initial_files: Optional initial files to commit + author_name: Author name for initial commit + author_email: Author email for initial commit + + Returns: + Initial commit SHA + """ + self._check_git() + + def _create(): + temp_dir = tempfile.mkdtemp(prefix="letta-memrepo-") + try: + repo_path = os.path.join(temp_dir, "repo") + os.makedirs(repo_path) + + # Initialize a new repository with main as default branch + _run_git(["init", "-b", "main"], cwd=repo_path) + + # Configure user for this repo + _run_git(["config", "user.name", author_name], cwd=repo_path) + _run_git(["config", "user.email", author_email], cwd=repo_path) + + # Add initial files if provided + if initial_files: + for file_path, content in initial_files.items(): + full_path = os.path.join(repo_path, file_path) + os.makedirs(os.path.dirname(full_path), exist_ok=True) + with open(full_path, "w", encoding="utf-8") as f: + f.write(content) + _run_git(["add", file_path], cwd=repo_path) + else: + # Create an empty .letta directory to initialize + letta_dir = os.path.join(repo_path, ".letta") + os.makedirs(letta_dir, exist_ok=True) + config_path = os.path.join(letta_dir, "config.json") + with open(config_path, "w") as f: + f.write('{"version": 1}') + _run_git(["add", ".letta/config.json"], cwd=repo_path) + + # Create initial commit + _run_git(["commit", "-m", "Initial commit"], cwd=repo_path) + + # Get commit SHA + result = _run_git(["rev-parse", "HEAD"], cwd=repo_path) + commit_sha = result.stdout.strip() + + return repo_path, commit_sha + except Exception: + shutil.rmtree(temp_dir, ignore_errors=True) + raise + + repo_path, commit_sha = await asyncio.to_thread(_create) + + try: + await self._upload_repo(repo_path, agent_id, org_id) + return commit_sha + finally: + shutil.rmtree(os.path.dirname(repo_path), ignore_errors=True) + + async def _upload_repo(self, local_repo_path: str, agent_id: str, org_id: str) -> None: + """Upload a local repo to storage (full upload).""" + t_start = time.perf_counter() + storage_prefix = self._repo_path(agent_id, org_id) + + git_dir = os.path.join(local_repo_path, ".git") + upload_tasks = [] + total_bytes = 0 + + t0 = time.perf_counter() + for root, dirs, files in os.walk(git_dir): + for filename in files: + local_path = os.path.join(root, filename) + rel_path = os.path.relpath(local_path, git_dir) + storage_path = f"{storage_prefix}/{rel_path}" + + with open(local_path, "rb") as f: + content = f.read() + + total_bytes += len(content) + upload_tasks.append((storage_path, content)) + read_time = (time.perf_counter() - t0) * 1000 + logger.info(f"[GIT_PERF] _upload_repo read files took {read_time:.2f}ms files={len(upload_tasks)}") + + t0 = time.perf_counter() + await asyncio.gather(*[self.storage.upload_bytes(path, content) for path, content in upload_tasks]) + upload_time = (time.perf_counter() - t0) * 1000 + + total_time = (time.perf_counter() - t_start) * 1000 + logger.info( + f"[GIT_PERF] _upload_repo TOTAL {total_time:.2f}ms " + f"files={len(upload_tasks)} bytes={total_bytes} " + f"upload_time={upload_time:.2f}ms" + ) + + @staticmethod + def _snapshot_git_files(git_dir: str) -> Dict[str, float]: + """Snapshot mtime of all files under .git/ for delta detection.""" + snapshot = {} + for root, _dirs, files in os.walk(git_dir): + for filename in files: + path = os.path.join(root, filename) + snapshot[path] = os.path.getmtime(path) + return snapshot + + async def _upload_delta( + self, + local_repo_path: str, + agent_id: str, + org_id: str, + before_snapshot: Dict[str, float], + ) -> None: + """Upload only new/modified files since before_snapshot.""" + t_start = time.perf_counter() + storage_prefix = self._repo_path(agent_id, org_id) + git_dir = os.path.join(local_repo_path, ".git") + + upload_tasks = [] + total_bytes = 0 + + for root, _dirs, files in os.walk(git_dir): + for filename in files: + local_path = os.path.join(root, filename) + old_mtime = before_snapshot.get(local_path) + if old_mtime is None or os.path.getmtime(local_path) != old_mtime: + rel_path = os.path.relpath(local_path, git_dir) + storage_path = f"{storage_prefix}/{rel_path}" + with open(local_path, "rb") as f: + content = f.read() + total_bytes += len(content) + upload_tasks.append((storage_path, content)) + + t0 = time.perf_counter() + await asyncio.gather(*[self.storage.upload_bytes(path, content) for path, content in upload_tasks]) + upload_time = (time.perf_counter() - t0) * 1000 + + total_time = (time.perf_counter() - t_start) * 1000 + logger.info( + f"[GIT_PERF] _upload_delta TOTAL {total_time:.2f}ms " + f"files={len(upload_tasks)} bytes={total_bytes} " + f"upload_time={upload_time:.2f}ms" + ) + + async def _download_repo(self, agent_id: str, org_id: str) -> str: + """Download a repo from storage to a temp directory. + + Returns: + Path to the temporary repo directory + """ + t_start = time.perf_counter() + storage_prefix = self._repo_path(agent_id, org_id) + + t0 = time.perf_counter() + files = await self.storage.list_files(storage_prefix) + list_time = (time.perf_counter() - t0) * 1000 + logger.info(f"[GIT_PERF] _download_repo storage.list_files took {list_time:.2f}ms files_count={len(files)}") + + if not files: + raise FileNotFoundError(f"No repository found for agent {agent_id}") + + t0 = time.perf_counter() + temp_dir = tempfile.mkdtemp(prefix="letta-memrepo-") + repo_path = os.path.join(temp_dir, "repo") + git_dir = os.path.join(repo_path, ".git") + os.makedirs(git_dir) + mkdir_time = (time.perf_counter() - t0) * 1000 + logger.info(f"[GIT_PERF] _download_repo tempdir creation took {mkdir_time:.2f}ms path={temp_dir}") + + file_info = [] + for file_path in files: + if file_path.startswith(storage_prefix): + rel_path = file_path[len(storage_prefix) + 1 :] + else: + rel_path = file_path.split("/")[-1] if "/" in file_path else file_path + + local_path = os.path.join(git_dir, rel_path) + os.makedirs(os.path.dirname(local_path), exist_ok=True) + file_info.append((file_path, local_path)) + + t0 = time.perf_counter() + download_tasks = [self.storage.download_bytes(fp) for fp, _ in file_info] + contents = await asyncio.gather(*download_tasks) + download_time = (time.perf_counter() - t0) * 1000 + total_bytes = sum(len(c) for c in contents) + logger.info(f"[GIT_PERF] _download_repo parallel download took {download_time:.2f}ms files={len(files)} bytes={total_bytes}") + + t0 = time.perf_counter() + for (_, local_path), content in zip(file_info, contents): + with open(local_path, "wb") as f: + f.write(content) + write_time = (time.perf_counter() - t0) * 1000 + + total_time = (time.perf_counter() - t_start) * 1000 + logger.info( + f"[GIT_PERF] _download_repo TOTAL {total_time:.2f}ms " + f"files={len(files)} bytes={total_bytes} " + f"download_time={download_time:.2f}ms write_time={write_time:.2f}ms" + ) + + return repo_path + + async def get_files( + self, + agent_id: str, + org_id: str, + ref: str = "HEAD", + ) -> Dict[str, str]: + """Get all files at a specific ref. + + Args: + agent_id: Agent ID + org_id: Organization ID + ref: Git ref (commit SHA, branch name, or 'HEAD') + + Returns: + Dict mapping file paths to content + """ + self._check_git() + repo_path = await self._download_repo(agent_id, org_id) + + try: + + def _get_files(): + # List all files tracked by git at the given ref + result = _run_git(["ls-tree", "-r", "--name-only", ref], cwd=repo_path) + file_paths = result.stdout.strip().split("\n") if result.stdout.strip() else [] + + files = {} + for file_path in file_paths: + if not file_path: + continue + # Get file content at ref + try: + content_result = _run_git(["show", f"{ref}:{file_path}"], cwd=repo_path) + files[file_path] = content_result.stdout + except subprocess.CalledProcessError: + pass # Skip files that can't be read + + return files + + return await asyncio.to_thread(_get_files) + finally: + shutil.rmtree(os.path.dirname(repo_path), ignore_errors=True) + + async def commit( + self, + agent_id: str, + org_id: str, + changes: List[FileChange], + message: str, + author_name: str = "Letta Agent", + author_email: str = "agent@letta.ai", + branch: str = "main", + ) -> MemoryCommit: + """Commit changes to the repository. + + Uses a Redis lock to prevent concurrent modifications. + + Args: + agent_id: Agent ID + org_id: Organization ID + changes: List of file changes + message: Commit message + author_name: Author name + author_email: Author email + branch: Branch to commit to + + Returns: + MemoryCommit with commit details + + Raises: + MemoryRepoBusyError: If another operation is in progress + """ + t_start = time.perf_counter() + logger.info(f"[GIT_PERF] GitOperations.commit START agent={agent_id} changes={len(changes)}") + + t0 = time.perf_counter() + redis_client = await get_redis_client() + lock_token = f"commit:{uuid.uuid4().hex}" + lock = await redis_client.acquire_memory_repo_lock(agent_id, lock_token) + logger.info(f"[GIT_PERF] acquire_memory_repo_lock took {(time.perf_counter() - t0) * 1000:.2f}ms") + + try: + t0 = time.perf_counter() + result = await self._commit_with_lock( + agent_id=agent_id, + org_id=org_id, + changes=changes, + message=message, + author_name=author_name, + author_email=author_email, + branch=branch, + ) + logger.info(f"[GIT_PERF] _commit_with_lock took {(time.perf_counter() - t0) * 1000:.2f}ms") + + total_time = (time.perf_counter() - t_start) * 1000 + logger.info(f"[GIT_PERF] GitOperations.commit TOTAL {total_time:.2f}ms") + return result + finally: + t0 = time.perf_counter() + if lock: + try: + await lock.release() + except Exception as e: + logger.warning(f"Failed to release lock for agent {agent_id}: {e}") + await redis_client.release_memory_repo_lock(agent_id) + logger.info(f"[GIT_PERF] lock release took {(time.perf_counter() - t0) * 1000:.2f}ms") + + async def _commit_with_lock( + self, + agent_id: str, + org_id: str, + changes: List[FileChange], + message: str, + author_name: str = "Letta Agent", + author_email: str = "agent@letta.ai", + branch: str = "main", + ) -> MemoryCommit: + """Internal commit implementation (called while holding lock).""" + t_start = time.perf_counter() + self._check_git() + + t0 = time.perf_counter() + repo_path = await self._download_repo(agent_id, org_id) + download_time = (time.perf_counter() - t0) * 1000 + logger.info(f"[GIT_PERF] _commit_with_lock download phase took {download_time:.2f}ms") + + try: + git_dir = os.path.join(repo_path, ".git") + before_snapshot = self._snapshot_git_files(git_dir) + + def _commit(): + t_git_start = time.perf_counter() + + # Configure user for this repo + _run_git(["config", "user.name", author_name], cwd=repo_path) + _run_git(["config", "user.email", author_email], cwd=repo_path) + + # Reset to clean state + t0_reset = time.perf_counter() + _run_git(["reset", "--hard"], cwd=repo_path) + reset_time = (time.perf_counter() - t0_reset) * 1000 + + # Get parent SHA before making changes + try: + parent_result = _run_git(["rev-parse", "HEAD"], cwd=repo_path, check=False) + parent_sha = parent_result.stdout.strip() if parent_result.returncode == 0 else None + except Exception: + parent_sha = None + + # Apply changes + files_changed = [] + additions = 0 + deletions = 0 + apply_time = 0 + + for change in changes: + t0_apply = time.perf_counter() + file_path = change.path.lstrip("/") + full_path = os.path.join(repo_path, file_path) + + if change.change_type == "delete" or change.content is None: + if os.path.exists(full_path): + with open(full_path, "r") as f: + deletions += len(f.read()) + os.remove(full_path) + _run_git(["rm", "-f", file_path], cwd=repo_path, check=False) + else: + os.makedirs(os.path.dirname(full_path), exist_ok=True) + + if os.path.exists(full_path): + with open(full_path, "r") as f: + old_content = f.read() + deletions += len(old_content) + additions += len(change.content) + + with open(full_path, "w", encoding="utf-8") as f: + f.write(change.content) + _run_git(["add", file_path], cwd=repo_path) + + files_changed.append(file_path) + apply_time += (time.perf_counter() - t0_apply) * 1000 + + # Create commit + t0_commit = time.perf_counter() + _run_git(["commit", "-m", message], cwd=repo_path) + commit_time = (time.perf_counter() - t0_commit) * 1000 + + # Get new commit SHA + result = _run_git(["rev-parse", "HEAD"], cwd=repo_path) + sha_str = result.stdout.strip() + + git_total = (time.perf_counter() - t_git_start) * 1000 + logger.info( + f"[GIT_PERF] _commit git operations: reset={reset_time:.2f}ms " + f"apply_changes={apply_time:.2f}ms commit={commit_time:.2f}ms total={git_total:.2f}ms" + ) + + return MemoryCommit( + sha=sha_str, + parent_sha=parent_sha, + message=message, + author_type="agent" if "agent" in author_email.lower() else "user", + author_id=agent_id, + author_name=author_name, + timestamp=datetime.now(timezone.utc), + files_changed=files_changed, + additions=additions, + deletions=deletions, + ) + + t0 = time.perf_counter() + commit = await asyncio.to_thread(_commit) + git_thread_time = (time.perf_counter() - t0) * 1000 + logger.info(f"[GIT_PERF] _commit_with_lock git thread took {git_thread_time:.2f}ms") + + t0 = time.perf_counter() + await self._upload_delta(repo_path, agent_id, org_id, before_snapshot) + upload_time = (time.perf_counter() - t0) * 1000 + logger.info(f"[GIT_PERF] _commit_with_lock upload phase (delta) took {upload_time:.2f}ms") + + total_time = (time.perf_counter() - t_start) * 1000 + logger.info( + f"[GIT_PERF] _commit_with_lock TOTAL {total_time:.2f}ms " + f"(download={download_time:.2f}ms git={git_thread_time:.2f}ms upload={upload_time:.2f}ms)" + ) + + return commit + finally: + t0 = time.perf_counter() + shutil.rmtree(os.path.dirname(repo_path), ignore_errors=True) + logger.info(f"[GIT_PERF] cleanup temp dir took {(time.perf_counter() - t0) * 1000:.2f}ms") + + async def get_history( + self, + agent_id: str, + org_id: str, + path: Optional[str] = None, + limit: int = 50, + ) -> List[MemoryCommit]: + """Get commit history. + + Args: + agent_id: Agent ID + org_id: Organization ID + path: Optional file path to filter by + limit: Maximum number of commits to return + + Returns: + List of commits, newest first + """ + self._check_git() + repo_path = await self._download_repo(agent_id, org_id) + + try: + + def _get_history(): + # Use git log with custom format for easy parsing + # Format: SHA|parent_sha|author_name|timestamp|message + format_str = "%H|%P|%an|%at|%s" + args = ["log", f"--format={format_str}", f"-n{limit}"] + if path: + args.extend(["--", path]) + + result = _run_git(args, cwd=repo_path) + lines = result.stdout.strip().split("\n") if result.stdout.strip() else [] + + commits = [] + for line in lines: + if not line: + continue + parts = line.split("|", 4) + if len(parts) < 5: + continue + + sha, parents, author_name, timestamp_str, message = parts + parent_sha = parents.split()[0] if parents else None + + commits.append( + MemoryCommit( + sha=sha, + parent_sha=parent_sha, + message=message, + author_type="system", + author_id="", + author_name=author_name, + timestamp=datetime.fromtimestamp(int(timestamp_str), tz=timezone.utc), + files_changed=[], + additions=0, + deletions=0, + ) + ) + + return commits + + return await asyncio.to_thread(_get_history) + finally: + shutil.rmtree(os.path.dirname(repo_path), ignore_errors=True) + + async def get_head_sha(self, agent_id: str, org_id: str) -> str: + """Get the current HEAD commit SHA. + + Args: + agent_id: Agent ID + org_id: Organization ID + + Returns: + HEAD commit SHA + """ + self._check_git() + repo_path = await self._download_repo(agent_id, org_id) + + try: + + def _get_head(): + result = _run_git(["rev-parse", "HEAD"], cwd=repo_path) + return result.stdout.strip() + + return await asyncio.to_thread(_get_head) + finally: + shutil.rmtree(os.path.dirname(repo_path), ignore_errors=True) + + async def delete_repo(self, agent_id: str, org_id: str) -> None: + """Delete an agent's repository from storage. + + Args: + agent_id: Agent ID + org_id: Organization ID + """ + storage_prefix = self._repo_path(agent_id, org_id) + await self.storage.delete_prefix(storage_prefix) + logger.info(f"Deleted repository for agent {agent_id}") diff --git a/letta/services/memory_repo/memfs_client_base.py b/letta/services/memory_repo/memfs_client_base.py new file mode 100644 index 00000000..c58d36f2 --- /dev/null +++ b/letta/services/memory_repo/memfs_client_base.py @@ -0,0 +1,384 @@ +"""Local filesystem-based client for git memory operations. + +This is the open-source implementation that stores git repositories +on the local filesystem (~/.letta/memfs/ by default). This enables +git-backed memory for self-hosted deployments without external dependencies. + +The cloud/enterprise version (memfs_client.py) connects to the memfs +HTTP service instead. +""" + +import hashlib +import os +import uuid +from typing import List, Optional + +from letta.constants import CORE_MEMORY_BLOCK_CHAR_LIMIT +from letta.log import get_logger +from letta.otel.tracing import trace_method +from letta.schemas.block import Block as PydanticBlock +from letta.schemas.memory_repo import MemoryCommit +from letta.schemas.user import User as PydanticUser +from letta.services.memory_repo.block_markdown import parse_block_markdown, serialize_block +from letta.services.memory_repo.git_operations import GitOperations +from letta.services.memory_repo.storage.local import LocalStorageBackend +from letta.utils import enforce_types + +logger = get_logger(__name__) + +# File paths within the memory repository (blocks stored at repo root as {label}.md) + +# Default local storage path +DEFAULT_LOCAL_PATH = os.path.expanduser("~/.letta/memfs") + + +class MemfsClient: + """Local filesystem-based client for git memory operations. + + Provides the same interface as the cloud MemfsClient but stores + repositories on the local filesystem using LocalStorageBackend. + This enables git-backed memory for self-hosted OSS deployments. + """ + + def __init__(self, base_url: str | None = None, local_path: str | None = None, timeout: float = 120.0): + """Initialize the local memfs client. + + Args: + base_url: Ignored (for interface compatibility with cloud client) + local_path: Path for local storage (default: ~/.letta/memfs) + timeout: Ignored (for interface compatibility) + """ + self.local_path = local_path or DEFAULT_LOCAL_PATH + self.storage = LocalStorageBackend(base_path=self.local_path) + self.git = GitOperations(storage=self.storage, redis_client=None) + + logger.info(f"MemfsClient initialized with local storage at {self.local_path}") + + async def close(self): + """Close the client (no-op for local storage).""" + pass + + # ========================================================================= + # Repository Operations + # ========================================================================= + + @enforce_types + @trace_method + async def create_repo_async( + self, + agent_id: str, + actor: PydanticUser, + initial_blocks: List[PydanticBlock] | None = None, + ) -> str: + """Create a new repository for an agent with optional initial blocks. + + Args: + agent_id: Agent ID + actor: User performing the operation + initial_blocks: Optional list of blocks to commit as initial state + + Returns: + The HEAD SHA of the created repository + """ + initial_blocks = initial_blocks or [] + org_id = actor.organization_id + + # Build initial files from blocks (frontmatter embeds metadata) + initial_files = {} + + for block in initial_blocks: + file_path = f"{block.label}.md" + initial_files[file_path] = serialize_block( + value=block.value or "", + description=block.description, + limit=block.limit, + read_only=block.read_only, + metadata=block.metadata, + ) + + return await self.git.create_repo( + agent_id=agent_id, + org_id=org_id, + initial_files=initial_files, + author_name=f"User {actor.id}", + author_email=f"{actor.id}@letta.ai", + ) + + # ========================================================================= + # Block Operations (Read) + # ========================================================================= + + @enforce_types + @trace_method + async def get_blocks_async( + self, + agent_id: str, + actor: PydanticUser, + ref: str = "HEAD", + ) -> List[PydanticBlock]: + """Get all memory blocks at a specific ref. + + Args: + agent_id: Agent ID + actor: User performing the operation + ref: Git ref (commit SHA, branch name, or 'HEAD') + + Returns: + List of memory blocks + """ + org_id = actor.organization_id + + try: + files = await self.git.get_files(agent_id, org_id, ref) + except FileNotFoundError: + return [] + + # Convert block files to PydanticBlock (metadata is in frontmatter) + blocks = [] + for file_path, content in files.items(): + if file_path.endswith(".md"): + label = file_path[:-3] + + parsed = parse_block_markdown(content) + + synthetic_uuid = uuid.UUID(hashlib.md5(f"{agent_id}:{label}".encode()).hexdigest()) + blocks.append( + PydanticBlock( + id=f"block-{synthetic_uuid}", + label=label, + value=parsed["value"], + description=parsed.get("description"), + limit=parsed.get("limit", CORE_MEMORY_BLOCK_CHAR_LIMIT), + read_only=parsed.get("read_only", False), + metadata=parsed.get("metadata", {}), + ) + ) + + return blocks + + @enforce_types + @trace_method + async def get_block_async( + self, + agent_id: str, + label: str, + actor: PydanticUser, + ref: str = "HEAD", + ) -> Optional[PydanticBlock]: + """Get a specific memory block. + + Args: + agent_id: Agent ID + label: Block label + actor: User performing the operation + ref: Git ref + + Returns: + Memory block or None + """ + blocks = await self.get_blocks_async(agent_id, actor, ref) + for block in blocks: + if block.label == label: + return block + return None + + # ========================================================================= + # Block Operations (Write) + # ========================================================================= + + async def _ensure_repo_exists(self, agent_id: str, actor: PydanticUser) -> str: + """Ensure the repository exists, creating if needed.""" + try: + return await self.git.get_head_sha(agent_id, actor.organization_id) + except FileNotFoundError: + return await self.git.create_repo( + agent_id=agent_id, + org_id=actor.organization_id, + initial_files={}, + author_name=f"User {actor.id}", + author_email=f"{actor.id}@letta.ai", + ) + + @enforce_types + @trace_method + async def update_block_async( + self, + agent_id: str, + label: str, + value: str, + actor: PydanticUser, + message: Optional[str] = None, + *, + description: Optional[str] = None, + limit: Optional[int] = None, + read_only: bool = False, + metadata: Optional[dict] = None, + ) -> MemoryCommit: + """Update a memory block. + + Args: + agent_id: Agent ID + label: Block label + value: New block value + actor: User performing the operation + message: Optional commit message + description: Block description (for frontmatter) + limit: Block character limit (for frontmatter) + read_only: Block read-only flag (for frontmatter) + metadata: Block metadata dict (for frontmatter) + + Returns: + Commit details + """ + from letta.schemas.memory_repo import FileChange + + await self._ensure_repo_exists(agent_id, actor) + + file_path = f"{label}.md" + file_content = serialize_block( + value=value, + description=description, + limit=limit, + read_only=read_only, + metadata=metadata, + ) + commit_message = message or f"Update {label}" + + return await self.git.commit( + agent_id=agent_id, + org_id=actor.organization_id, + changes=[FileChange(path=file_path, content=file_content, change_type="modify")], + message=commit_message, + author_name=f"User {actor.id}", + author_email=f"{actor.id}@letta.ai", + ) + + @enforce_types + @trace_method + async def create_block_async( + self, + agent_id: str, + block: PydanticBlock, + actor: PydanticUser, + message: Optional[str] = None, + ) -> MemoryCommit: + """Create a new memory block. + + Args: + agent_id: Agent ID + block: Block to create + actor: User performing the operation + message: Optional commit message + + Returns: + Commit details + """ + from letta.schemas.memory_repo import FileChange + + await self._ensure_repo_exists(agent_id, actor) + org_id = actor.organization_id + + file_content = serialize_block( + value=block.value or "", + description=block.description, + limit=block.limit, + read_only=block.read_only, + metadata=block.metadata, + ) + + changes = [ + FileChange( + path=f"{block.label}.md", + content=file_content, + change_type="add", + ), + ] + + commit_message = message or f"Create block {block.label}" + + return await self.git.commit( + agent_id=agent_id, + org_id=org_id, + changes=changes, + message=commit_message, + author_name=f"User {actor.id}", + author_email=f"{actor.id}@letta.ai", + ) + + @enforce_types + @trace_method + async def delete_block_async( + self, + agent_id: str, + label: str, + actor: PydanticUser, + message: Optional[str] = None, + ) -> MemoryCommit: + """Delete a memory block. + + Args: + agent_id: Agent ID + label: Block label to delete + actor: User performing the operation + message: Optional commit message + + Returns: + Commit details + """ + from letta.schemas.memory_repo import FileChange + + await self._ensure_repo_exists(agent_id, actor) + org_id = actor.organization_id + + changes = [ + FileChange( + path=f"{label}.md", + content=None, + change_type="delete", + ), + ] + + commit_message = message or f"Delete block {label}" + + return await self.git.commit( + agent_id=agent_id, + org_id=org_id, + changes=changes, + message=commit_message, + author_name=f"User {actor.id}", + author_email=f"{actor.id}@letta.ai", + ) + + # ========================================================================= + # History Operations + # ========================================================================= + + @enforce_types + @trace_method + async def get_history_async( + self, + agent_id: str, + actor: PydanticUser, + path: Optional[str] = None, + limit: int = 50, + ) -> List[MemoryCommit]: + """Get commit history. + + Args: + agent_id: Agent ID + actor: User performing the operation + path: Optional file path to filter by + limit: Maximum commits to return + + Returns: + List of commits, newest first + """ + try: + return await self.git.get_history( + agent_id=agent_id, + org_id=actor.organization_id, + path=path, + limit=limit, + ) + except FileNotFoundError: + return [] diff --git a/letta/services/memory_repo/storage/__init__.py b/letta/services/memory_repo/storage/__init__.py new file mode 100644 index 00000000..968f8fd3 --- /dev/null +++ b/letta/services/memory_repo/storage/__init__.py @@ -0,0 +1,9 @@ +"""Storage backends for memory repositories.""" + +from letta.services.memory_repo.storage.base import StorageBackend +from letta.services.memory_repo.storage.local import LocalStorageBackend + +__all__ = [ + "LocalStorageBackend", + "StorageBackend", +] diff --git a/letta/services/memory_repo/storage/base.py b/letta/services/memory_repo/storage/base.py new file mode 100644 index 00000000..1f7e4150 --- /dev/null +++ b/letta/services/memory_repo/storage/base.py @@ -0,0 +1,127 @@ +"""Abstract base class for storage backends.""" + +from abc import ABC, abstractmethod +from typing import List + + +class StorageBackend(ABC): + """Abstract storage backend for memory repositories. + + Provides a unified interface for storing git repository objects + in various object storage systems (GCS, S3, local filesystem). + """ + + @property + @abstractmethod + def bucket_name(self) -> str: + """Return the bucket/container name.""" + pass + + @abstractmethod + async def upload_bytes(self, path: str, content: bytes) -> None: + """Upload bytes to the given path. + + Args: + path: Path within the bucket (e.g., "org-123/agent-456/objects/pack/pack-abc.pack") + content: Raw bytes to upload + """ + pass + + @abstractmethod + async def download_bytes(self, path: str) -> bytes: + """Download bytes from the given path. + + Args: + path: Path within the bucket + + Returns: + Raw bytes content + + Raises: + FileNotFoundError: If the path doesn't exist + """ + pass + + @abstractmethod + async def exists(self, path: str) -> bool: + """Check if a path exists. + + Args: + path: Path within the bucket + + Returns: + True if the path exists + """ + pass + + @abstractmethod + async def delete(self, path: str) -> None: + """Delete a file at the given path. + + Args: + path: Path within the bucket + + Raises: + FileNotFoundError: If the path doesn't exist + """ + pass + + @abstractmethod + async def list_files(self, prefix: str) -> List[str]: + """List all files with the given prefix. + + Args: + prefix: Path prefix to filter by + + Returns: + List of full paths matching the prefix + """ + pass + + @abstractmethod + async def delete_prefix(self, prefix: str) -> int: + """Delete all files with the given prefix. + + Args: + prefix: Path prefix to delete + + Returns: + Number of files deleted + """ + pass + + async def upload_text(self, path: str, content: str, encoding: str = "utf-8") -> None: + """Upload text content to the given path. + + Args: + path: Path within the bucket + content: Text content to upload + encoding: Text encoding (default: utf-8) + """ + await self.upload_bytes(path, content.encode(encoding)) + + async def download_text(self, path: str, encoding: str = "utf-8") -> str: + """Download text content from the given path. + + Args: + path: Path within the bucket + encoding: Text encoding (default: utf-8) + + Returns: + Text content + """ + content = await self.download_bytes(path) + return content.decode(encoding) + + async def copy(self, source_path: str, dest_path: str) -> None: + """Copy a file from source to destination. + + Default implementation downloads and re-uploads. + Subclasses may override with more efficient implementations. + + Args: + source_path: Source path + dest_path: Destination path + """ + content = await self.download_bytes(source_path) + await self.upload_bytes(dest_path, content) diff --git a/letta/services/memory_repo/storage/local.py b/letta/services/memory_repo/storage/local.py new file mode 100644 index 00000000..2bc6c631 --- /dev/null +++ b/letta/services/memory_repo/storage/local.py @@ -0,0 +1,149 @@ +"""Local filesystem storage backend for memory repositories. + +This backend stores git repository data on the local filesystem, +making git-backed memory available without external dependencies. +Ideal for self-hosted OSS deployments. +""" + +import os +import shutil +from pathlib import Path +from typing import List, Optional + +from letta.log import get_logger +from letta.services.memory_repo.storage.base import StorageBackend + +logger = get_logger(__name__) + + +class LocalStorageBackend(StorageBackend): + """Local filesystem storage backend for memory repositories. + + Stores repository data under a configurable base path, defaulting to + ~/.letta/memfs/. This enables git-backed memory for self-hosted + deployments without requiring cloud storage. + + Directory structure: + {base_path}/{prefix}/{org_id}/{agent_id}/repo.git/ + """ + + def __init__( + self, + base_path: Optional[str] = None, + prefix: str = "repository", + ): + """Initialize local storage backend. + + Args: + base_path: Base directory for storage (default: ~/.letta/memfs) + prefix: Prefix for all paths in this backend (default: "repository") + """ + if base_path is None: + base_path = os.path.expanduser("~/.letta/memfs") + + self._base_path = Path(base_path) + self._prefix = prefix.rstrip("/") + self._bucket_name = "local" # For interface compatibility + + # Ensure base directory exists + self._base_path.mkdir(parents=True, exist_ok=True) + logger.debug(f"LocalStorageBackend initialized at {self._base_path}") + + def _full_path(self, path: str) -> Path: + """Get full filesystem path including prefix.""" + path = path.lstrip("/") + if self._prefix: + return self._base_path / self._prefix / path + return self._base_path / path + + @property + def bucket_name(self) -> str: + """Return the bucket name (for interface compatibility).""" + return self._bucket_name + + async def upload_bytes(self, path: str, content: bytes) -> None: + """Write bytes to a local file.""" + full_path = self._full_path(path) + full_path.parent.mkdir(parents=True, exist_ok=True) + + with open(full_path, "wb") as f: + f.write(content) + + logger.debug(f"Wrote {len(content)} bytes to {full_path}") + + async def download_bytes(self, path: str) -> bytes: + """Read bytes from a local file.""" + full_path = self._full_path(path) + + if not full_path.exists(): + raise FileNotFoundError(f"{full_path} not found") + + with open(full_path, "rb") as f: + return f.read() + + async def exists(self, path: str) -> bool: + """Check if a path exists.""" + full_path = self._full_path(path) + return full_path.exists() + + async def delete(self, path: str) -> None: + """Delete a file.""" + full_path = self._full_path(path) + + if not full_path.exists(): + raise FileNotFoundError(f"{full_path} not found") + + full_path.unlink() + logger.debug(f"Deleted {full_path}") + + async def list_files(self, prefix: str) -> List[str]: + """List all files with the given prefix.""" + full_prefix = self._full_path(prefix) + + if not full_prefix.exists(): + return [] + + result = [] + if full_prefix.is_file(): + # Prefix is a file, return it + rel_path = str(full_prefix.relative_to(self._base_path / self._prefix)) + result.append(rel_path) + else: + # Walk directory + for file_path in full_prefix.rglob("*"): + if file_path.is_file(): + rel_path = str(file_path.relative_to(self._base_path / self._prefix)) + result.append(rel_path) + + return result + + async def delete_prefix(self, prefix: str) -> int: + """Delete all files with the given prefix.""" + full_prefix = self._full_path(prefix) + + if not full_prefix.exists(): + return 0 + + # Count files before deletion + count = sum(1 for _ in full_prefix.rglob("*") if _.is_file()) + + if full_prefix.is_file(): + full_prefix.unlink() + count = 1 + else: + shutil.rmtree(full_prefix, ignore_errors=True) + + logger.debug(f"Deleted {count} files with prefix {prefix}") + return count + + async def copy(self, source_path: str, dest_path: str) -> None: + """Copy a file.""" + source_full = self._full_path(source_path) + dest_full = self._full_path(dest_path) + + if not source_full.exists(): + raise FileNotFoundError(f"{source_full} not found") + + dest_full.parent.mkdir(parents=True, exist_ok=True) + shutil.copy2(source_full, dest_full) + logger.debug(f"Copied {source_full} to {dest_full}") diff --git a/letta/services/message_manager.py b/letta/services/message_manager.py index 1da5ea8a..e4a6ca0c 100644 --- a/letta/services/message_manager.py +++ b/letta/services/message_manager.py @@ -13,7 +13,7 @@ from letta.orm.message import Message as MessageModel from letta.otel.tracing import trace_method from letta.schemas.enums import MessageRole, PrimitiveType from letta.schemas.letta_message import LettaMessageUpdateUnion -from letta.schemas.letta_message_content import ImageSourceType, LettaImage, MessageContentType, TextContent +from letta.schemas.letta_message_content import ImageSourceType, LettaImage, MessageContentType from letta.schemas.message import Message as PydanticMessage, MessageSearchResult, MessageUpdate from letta.schemas.user import User as PydanticUser from letta.server.db import db_registry diff --git a/letta/services/passage_manager.py b/letta/services/passage_manager.py index 0a69e70e..171dffea 100644 --- a/letta/services/passage_manager.py +++ b/letta/services/passage_manager.py @@ -1,6 +1,9 @@ import uuid from datetime import datetime, timezone -from typing import Dict, List, Optional +from typing import TYPE_CHECKING, Dict, List, Optional + +if TYPE_CHECKING: + from letta.orm.sqlalchemy_base import SqlalchemyBase from openai import AsyncOpenAI from sqlalchemy import func, select @@ -23,7 +26,6 @@ from letta.schemas.passage import Passage as PydanticPassage from letta.schemas.user import User as PydanticUser from letta.server.db import db_registry from letta.services.archive_manager import ArchiveManager -from letta.settings import settings from letta.utils import enforce_types logger = get_logger(__name__) @@ -351,7 +353,7 @@ class PassageManager: return passage.to_pydantic() @trace_method - def _preprocess_passage_for_creation(self, pydantic_passage: PydanticPassage) -> "SqlAlchemyBase": + def _preprocess_passage_for_creation(self, pydantic_passage: PydanticPassage) -> "SqlalchemyBase": data = pydantic_passage.model_dump(to_orm=True) common_fields = { "id": data.get("id"), @@ -365,13 +367,13 @@ class PassageManager: "created_at": data.get("created_at", datetime.now(timezone.utc)), } - if "archive_id" in data and data["archive_id"]: + if data.get("archive_id"): assert not data.get("source_id"), "Passage cannot have both archive_id and source_id" agent_fields = { "archive_id": data["archive_id"], } passage = ArchivalPassage(**common_fields, **agent_fields) - elif "source_id" in data and data["source_id"]: + elif data.get("source_id"): assert not data.get("archive_id"), "Passage cannot have both archive_id and source_id" source_fields = { "source_id": data["source_id"], @@ -496,7 +498,6 @@ class PassageManager: @trace_method def create_many_passages(self, passages: List[PydanticPassage], actor: PydanticUser) -> List[PydanticPassage]: """DEPRECATED: Use create_many_agent_passages() or create_many_source_passages() instead.""" - import warnings logger.warning( "create_many_passages is deprecated. Use create_many_agent_passages() or create_many_source_passages() instead.", @@ -508,7 +509,6 @@ class PassageManager: @trace_method async def create_many_passages_async(self, passages: List[PydanticPassage], actor: PydanticUser) -> List[PydanticPassage]: """DEPRECATED: Use create_many_agent_passages_async() or create_many_source_passages_async() instead.""" - import warnings logger.warning( "create_many_passages_async is deprecated. Use create_many_agent_passages_async() or create_many_source_passages_async() instead.", @@ -696,7 +696,7 @@ class PassageManager: setattr(curr_passage, "tags", new_tags) # Pad embeddings if needed (only when using Postgres as vector DB) - if "embedding" in update_data and update_data["embedding"]: + if update_data.get("embedding"): import numpy as np from letta.helpers.tpuf_client import should_use_tpuf @@ -741,7 +741,7 @@ class PassageManager: update_data = passage.model_dump(to_orm=True, exclude_unset=True, exclude_none=True) # Pad embeddings if needed (only when using Postgres as vector DB) - if "embedding" in update_data and update_data["embedding"]: + if update_data.get("embedding"): import numpy as np from letta.helpers.tpuf_client import should_use_tpuf @@ -814,7 +814,6 @@ class PassageManager: @trace_method async def delete_passage_by_id_async(self, passage_id: str, actor: PydanticUser) -> bool: """DEPRECATED: Use delete_agent_passage_by_id_async() or delete_source_passage_by_id_async() instead.""" - import warnings logger.warning( "delete_passage_by_id_async is deprecated. Use delete_agent_passage_by_id_async() or delete_source_passage_by_id_async() instead.", @@ -927,7 +926,6 @@ class PassageManager: passages: List[PydanticPassage], ) -> bool: """DEPRECATED: Use delete_agent_passages() or delete_source_passages() instead.""" - import warnings logger.warning( "delete_passages is deprecated. Use delete_agent_passages() or delete_source_passages() instead.", @@ -948,7 +946,6 @@ class PassageManager: agent_id: Optional[str] = None, ) -> int: """DEPRECATED: Use agent_passage_size() instead (this only counted agent passages anyway).""" - import warnings logger.warning("size is deprecated. Use agent_passage_size() instead.", stacklevel=2) return self.agent_passage_size(actor=actor, agent_id=agent_id) diff --git a/letta/services/per_agent_lock_manager.py b/letta/services/per_agent_lock_manager.py deleted file mode 100644 index aff76a1f..00000000 --- a/letta/services/per_agent_lock_manager.py +++ /dev/null @@ -1,22 +0,0 @@ -import threading -from collections import defaultdict - -from letta.otel.tracing import trace_method - - -class PerAgentLockManager: - """Manages per-agent locks.""" - - def __init__(self): - self.locks = defaultdict(threading.Lock) - - @trace_method - def get_lock(self, agent_id: str) -> threading.Lock: - """Retrieve the lock for a specific agent_id.""" - return self.locks[agent_id] - - @trace_method - def clear_lock(self, agent_id: str): - """Optionally remove a lock if no longer needed (to prevent unbounded growth).""" - if agent_id in self.locks: - del self.locks[agent_id] diff --git a/letta/services/provider_manager.py b/letta/services/provider_manager.py index 1556ecef..d1c98dc5 100644 --- a/letta/services/provider_manager.py +++ b/letta/services/provider_manager.py @@ -3,7 +3,6 @@ from typing import List, Optional, Tuple, Union from sqlalchemy import and_, select from letta.log import get_logger -from letta.orm.errors import UniqueConstraintViolationError from letta.orm.provider import Provider as ProviderModel from letta.orm.provider_model import ProviderModel as ProviderModelORM from letta.otel.tracing import trace_method @@ -410,7 +409,7 @@ class ProviderManager: try: provider_model = await ProviderModel.read_async(db_session=session, identifier=provider_id, actor=actor) return provider_model.to_pydantic() - except: + except Exception: # If not found, try to get as global provider (organization_id=NULL) from sqlalchemy import select @@ -675,7 +674,7 @@ class ProviderManager: for llm_config in llm_models: logger.info(f" Checking LLM model: {llm_config.handle} (name: {llm_config.model})") - # Check if model already exists (excluding soft-deleted ones) + # Check if model already exists by handle (excluding soft-deleted ones) existing = await ProviderModelORM.list_async( db_session=session, limit=1, @@ -687,6 +686,19 @@ class ProviderManager: }, ) + # Also check by name+provider_id (covers unique_model_per_provider_and_type constraint) + if not existing: + existing = await ProviderModelORM.list_async( + db_session=session, + limit=1, + check_is_deleted=True, + **{ + "name": llm_config.model, + "provider_id": provider.id, + "model_type": "llm", + }, + ) + if not existing: logger.info(f" Creating new LLM model {llm_config.handle}") # Create new model entry @@ -710,19 +722,12 @@ class ProviderManager: f"org_id={pydantic_model.organization_id}" ) - # Convert to ORM model = ProviderModelORM(**pydantic_model.model_dump(to_orm=True)) - try: - await model.create_async(session) - logger.info(f" ✓ Successfully created LLM model {llm_config.handle} with ID {model.id}") - except Exception as e: - logger.info(f" ✗ Failed to create LLM model {llm_config.handle}: {e}") - # Log the full error details - import traceback - - logger.info(f" Full traceback: {traceback.format_exc()}") - # Roll back the session to clear the failed transaction - await session.rollback() + result = await model.create_async(session, ignore_conflicts=True) + if result: + logger.info(f" ✓ Successfully created LLM model {llm_config.handle}") + else: + logger.info(f" LLM model {llm_config.handle} already exists (concurrent insert), skipping") else: # Check if max_context_window or model_endpoint_type needs to be updated existing_model = existing[0] @@ -754,7 +759,7 @@ class ProviderManager: for embedding_config in embedding_models: logger.info(f" Checking embedding model: {embedding_config.handle} (name: {embedding_config.embedding_model})") - # Check if model already exists (excluding soft-deleted ones) + # Check if model already exists by handle (excluding soft-deleted ones) existing = await ProviderModelORM.list_async( db_session=session, limit=1, @@ -766,6 +771,19 @@ class ProviderManager: }, ) + # Also check by name+provider_id (covers unique_model_per_provider_and_type constraint) + if not existing: + existing = await ProviderModelORM.list_async( + db_session=session, + limit=1, + check_is_deleted=True, + **{ + "name": embedding_config.embedding_model, + "provider_id": provider.id, + "model_type": "embedding", + }, + ) + if not existing: logger.info(f" Creating new embedding model {embedding_config.handle}") # Create new model entry @@ -787,19 +805,12 @@ class ProviderManager: f"org_id={pydantic_model.organization_id}" ) - # Convert to ORM model = ProviderModelORM(**pydantic_model.model_dump(to_orm=True)) - try: - await model.create_async(session) - logger.info(f" ✓ Successfully created embedding model {embedding_config.handle} with ID {model.id}") - except Exception as e: - logger.error(f" ✗ Failed to create embedding model {embedding_config.handle}: {e}") - # Log the full error details - import traceback - - logger.error(f" Full traceback: {traceback.format_exc()}") - # Roll back the session to clear the failed transaction - await session.rollback() + result = await model.create_async(session, ignore_conflicts=True) + if result: + logger.info(f" ✓ Successfully created embedding model {embedding_config.handle}") + else: + logger.info(f" Embedding model {embedding_config.handle} already exists (concurrent insert), skipping") else: # Check if model_endpoint_type needs to be updated existing_model = existing[0] @@ -979,10 +990,13 @@ class ProviderManager: # Get the default max_output_tokens from the provider (provider-specific logic) max_tokens = typed_provider.get_default_max_output_tokens(model.name) - # Determine the model endpoint - use provider's base_url if set, - # otherwise use provider-specific defaults + # Determine the model endpoint - use provider's OpenAI-compatible base_url if available, + # otherwise fall back to raw base_url or provider-specific defaults - if typed_provider.base_url: + if hasattr(typed_provider, "openai_compat_base_url"): + # For providers like ollama/vllm/lmstudio that need /v1 appended for OpenAI compatibility + model_endpoint = typed_provider.openai_compat_base_url + elif typed_provider.base_url: model_endpoint = typed_provider.base_url elif provider.provider_type == ProviderType.chatgpt_oauth: # ChatGPT OAuth uses the ChatGPT backend API, not a generic endpoint pattern @@ -1034,7 +1048,7 @@ class ProviderManager: # Model not in DB - check if it's from a BYOK provider # Handle format is "provider_name/model_name" if "/" in handle: - provider_name, model_name = handle.split("/", 1) + provider_name, _model_name = handle.split("/", 1) byok_providers = await self.list_providers_async( actor=actor, name=provider_name, diff --git a/letta/services/provider_trace_backends/clickhouse.py b/letta/services/provider_trace_backends/clickhouse.py index 1c5731f7..3ba84772 100644 --- a/letta/services/provider_trace_backends/clickhouse.py +++ b/letta/services/provider_trace_backends/clickhouse.py @@ -1,18 +1,27 @@ -"""ClickHouse provider trace backend.""" +"""ClickHouse provider trace backend. +Writes and reads from the llm_traces table with denormalized columns for cost analytics. +""" + +import json +import uuid +from typing import TYPE_CHECKING, Optional + +from letta.log import get_logger from letta.schemas.provider_trace import ProviderTrace from letta.schemas.user import User from letta.services.clickhouse_provider_traces import ClickhouseProviderTraceReader from letta.services.provider_trace_backends.base import ProviderTraceBackendClient +from letta.settings import settings + +if TYPE_CHECKING: + from letta.schemas.llm_trace import LLMTrace + +logger = get_logger(__name__) class ClickhouseProviderTraceBackend(ProviderTraceBackendClient): - """ - Store provider traces in ClickHouse. - - Writes flow through OTEL instrumentation, so create_async is a no-op. - Only reads are performed directly against ClickHouse. - """ + """ClickHouse backend for provider traces (reads and writes from llm_traces table).""" def __init__(self): self._reader = ClickhouseProviderTraceReader() @@ -21,9 +30,28 @@ class ClickhouseProviderTraceBackend(ProviderTraceBackendClient): self, actor: User, provider_trace: ProviderTrace, - ) -> ProviderTrace: - # ClickHouse writes flow through OTEL instrumentation, not direct writes. - # Return a ProviderTrace with the same ID for consistency across backends. + ) -> ProviderTrace | None: + """Write provider trace to ClickHouse llm_traces table.""" + if not settings.store_llm_traces: + # Return minimal trace for consistency if writes disabled + return ProviderTrace( + id=provider_trace.id, + step_id=provider_trace.step_id, + request_json=provider_trace.request_json or {}, + response_json=provider_trace.response_json or {}, + ) + + try: + from letta.services.llm_trace_writer import get_llm_trace_writer + + trace = self._convert_to_trace(actor, provider_trace) + if trace: + writer = get_llm_trace_writer() + await writer.write_async(trace) + + except Exception as e: + logger.debug(f"Failed to write trace to ClickHouse: {e}") + return ProviderTrace( id=provider_trace.id, step_id=provider_trace.step_id, @@ -36,7 +64,124 @@ class ClickhouseProviderTraceBackend(ProviderTraceBackendClient): step_id: str, actor: User, ) -> ProviderTrace | None: + """Read provider trace from llm_traces table by step_id.""" return await self._reader.get_provider_trace_by_step_id_async( step_id=step_id, organization_id=actor.organization_id, ) + + def _convert_to_trace( + self, + actor: User, + provider_trace: ProviderTrace, + ) -> Optional["LLMTrace"]: + """Convert ProviderTrace to LLMTrace for analytics storage.""" + from letta.schemas.llm_trace import LLMTrace + + # Serialize JSON fields + request_json_str = json.dumps(provider_trace.request_json, default=str) + response_json_str = json.dumps(provider_trace.response_json, default=str) + llm_config_json_str = json.dumps(provider_trace.llm_config, default=str) if provider_trace.llm_config else "{}" + + # Extract provider and model from llm_config + llm_config = provider_trace.llm_config or {} + provider = llm_config.get("model_endpoint_type", "unknown") + model = llm_config.get("model", "unknown") + is_byok = llm_config.get("provider_category") == "byok" + + # Extract usage from response (generic parsing for common formats) + usage = self._extract_usage(provider_trace.response_json, provider) + + # Check for error in response - must have actual error content, not just null + # OpenAI Responses API returns {"error": null} on success + error_data = provider_trace.response_json.get("error") + error_type = provider_trace.response_json.get("error_type") + error_message = None + is_error = bool(error_data) or bool(error_type) + if is_error: + if isinstance(error_data, dict): + error_type = error_type or error_data.get("type") + error_message = error_data.get("message", str(error_data))[:1000] + elif error_data: + error_message = str(error_data)[:1000] + + # Extract UUID from provider_trace.id (strip "provider_trace-" prefix) + trace_id = provider_trace.id + if not trace_id: + logger.warning("ProviderTrace missing id - trace correlation across backends will fail") + trace_id = str(uuid.uuid4()) + elif trace_id.startswith("provider_trace-"): + trace_id = trace_id[len("provider_trace-") :] + + return LLMTrace( + id=trace_id, + organization_id=provider_trace.org_id or actor.organization_id, + project_id=None, + agent_id=provider_trace.agent_id, + agent_tags=provider_trace.agent_tags or [], + run_id=provider_trace.run_id, + step_id=provider_trace.step_id, + trace_id=None, + call_type=provider_trace.call_type or "unknown", + provider=provider, + model=model, + is_byok=is_byok, + request_size_bytes=len(request_json_str.encode("utf-8")), + response_size_bytes=len(response_json_str.encode("utf-8")), + prompt_tokens=usage.get("prompt_tokens", 0), + completion_tokens=usage.get("completion_tokens", 0), + total_tokens=usage.get("total_tokens", 0), + cached_input_tokens=usage.get("cached_input_tokens"), + cache_write_tokens=usage.get("cache_write_tokens"), + reasoning_tokens=usage.get("reasoning_tokens"), + latency_ms=0, # Not available in ProviderTrace + is_error=is_error, + error_type=error_type, + error_message=error_message, + request_json=request_json_str, + response_json=response_json_str, + llm_config_json=llm_config_json_str, + ) + + def _extract_usage(self, response_json: dict, provider: str) -> dict: + """Extract usage statistics from response JSON. + + Handles common formats from OpenAI, Anthropic, and other providers. + """ + usage = {} + + # OpenAI format: response.usage + if "usage" in response_json: + u = response_json["usage"] + usage["prompt_tokens"] = u.get("prompt_tokens", 0) + usage["completion_tokens"] = u.get("completion_tokens", 0) + usage["total_tokens"] = u.get("total_tokens", 0) + + # OpenAI reasoning tokens + if "completion_tokens_details" in u: + details = u["completion_tokens_details"] + usage["reasoning_tokens"] = details.get("reasoning_tokens") + + # OpenAI cached tokens + if "prompt_tokens_details" in u: + details = u["prompt_tokens_details"] + usage["cached_input_tokens"] = details.get("cached_tokens") + + # Anthropic format: response.usage with cache fields + if provider == "anthropic" and "usage" in response_json: + u = response_json["usage"] + # input_tokens can be 0 when all tokens come from cache + input_tokens = u.get("input_tokens", 0) + cache_read = u.get("cache_read_input_tokens", 0) + cache_write = u.get("cache_creation_input_tokens", 0) + # Total prompt = input + cached (for cost analytics) + usage["prompt_tokens"] = input_tokens + cache_read + cache_write + usage["completion_tokens"] = u.get("output_tokens", usage.get("completion_tokens", 0)) + usage["cached_input_tokens"] = cache_read if cache_read else None + usage["cache_write_tokens"] = cache_write if cache_write else None + + # Recalculate total if not present + if "total_tokens" not in usage or usage["total_tokens"] == 0: + usage["total_tokens"] = usage.get("prompt_tokens", 0) + usage.get("completion_tokens", 0) + + return usage diff --git a/letta/services/provider_trace_backends/socket.py b/letta/services/provider_trace_backends/socket.py index 1d375e57..706eac1c 100644 --- a/letta/services/provider_trace_backends/socket.py +++ b/letta/services/provider_trace_backends/socket.py @@ -4,6 +4,7 @@ import json import os import socket as socket_module import threading +import time from datetime import datetime, timezone from typing import Any @@ -18,7 +19,8 @@ logger = get_logger(__name__) # Bump this when making breaking changes to the record schema. # Must match ProtocolVersion in apps/crouton/main.go. # v2: Added user_id, compaction_settings (summarization), llm_config (non-summarization) -PROTOCOL_VERSION = 2 +# v3: Increased buffer to 128MB, native sidecar for deterministic startup +PROTOCOL_VERSION = 3 class SocketProviderTraceBackend(ProviderTraceBackendClient): @@ -106,17 +108,29 @@ class SocketProviderTraceBackend(ProviderTraceBackendClient): thread = threading.Thread(target=self._send_async, args=(record,), daemon=True) thread.start() - def _send_async(self, record: dict[str, Any]) -> None: + def _send_async(self, record: dict[str, Any], max_retries: int = 3) -> None: """Send record to Unix socket (runs in background thread).""" - try: - if not os.path.exists(self.socket_path): - logger.warning(f"Crouton socket not found at {self.socket_path}") - return + base_delay = 0.5 + for attempt in range(max_retries): + try: + if not os.path.exists(self.socket_path): + if attempt < max_retries - 1: + time.sleep(base_delay * (2**attempt)) + continue + logger.warning(f"Crouton socket not found at {self.socket_path} after {max_retries} attempts") + return - with socket_module.socket(socket_module.AF_UNIX, socket_module.SOCK_STREAM) as sock: - sock.settimeout(5.0) - sock.connect(self.socket_path) - payload = json.dumps(record, default=str) + "\n" - sock.sendall(payload.encode()) - except Exception as e: - logger.warning(f"Failed to send telemetry to Crouton: {e}") + with socket_module.socket(socket_module.AF_UNIX, socket_module.SOCK_STREAM) as sock: + sock.settimeout(60.0) # Match crouton's connectionTimeout for large payloads + sock.connect(self.socket_path) + payload = json.dumps(record, default=str) + "\n" + sock.sendall(payload.encode()) + return + except BrokenPipeError: + if attempt < max_retries - 1: + time.sleep(base_delay * (2**attempt)) + continue + logger.warning(f"Failed to send telemetry to Crouton: broken pipe after {max_retries} attempts") + except Exception as e: + logger.warning(f"Failed to send telemetry to Crouton: {e}") + return diff --git a/letta/services/run_manager.py b/letta/services/run_manager.py index c841a62d..7283b701 100644 --- a/letta/services/run_manager.py +++ b/letta/services/run_manager.py @@ -1,26 +1,21 @@ from datetime import datetime -from multiprocessing import Value -from pickletools import pyunicode from typing import List, Literal, Optional from httpx import AsyncClient from letta.data_sources.redis_client import get_redis_client -from letta.errors import LettaInvalidArgumentError from letta.helpers.datetime_helpers import get_utc_time from letta.log import get_logger from letta.log_context import update_log_context from letta.orm.agent import Agent as AgentModel from letta.orm.errors import NoResultFound -from letta.orm.message import Message as MessageModel from letta.orm.run import Run as RunModel from letta.orm.run_metrics import RunMetrics as RunMetricsModel from letta.orm.sqlalchemy_base import AccessType -from letta.orm.step import Step as StepModel from letta.otel.tracing import log_event, trace_method from letta.schemas.enums import AgentType, ComparisonOperator, MessageRole, PrimitiveType, RunStatus from letta.schemas.job import LettaRequestConfig -from letta.schemas.letta_message import LettaMessage, LettaMessageUnion +from letta.schemas.letta_message import LettaMessage from letta.schemas.letta_response import LettaResponse from letta.schemas.letta_stop_reason import LettaStopReason, StopReasonType from letta.schemas.message import Message as PydanticMessage @@ -162,7 +157,7 @@ class RunManager: ) -> List[PydanticRun]: """List runs with filtering options.""" async with db_registry.async_session() as session: - from sqlalchemy import func, or_, select + from sqlalchemy import func, select # Always join with run_metrics to get duration data query = ( @@ -744,7 +739,7 @@ class RunManager: ) # Combine approval response and tool messages - new_messages = approval_response_messages + [tool_message] + new_messages = [*approval_response_messages, tool_message] # Checkpoint the new messages from letta.agents.agent_loop import AgentLoop diff --git a/letta/services/source_manager.py b/letta/services/source_manager.py index 6f1891e7..825353cf 100644 --- a/letta/services/source_manager.py +++ b/letta/services/source_manager.py @@ -1,4 +1,3 @@ -import asyncio from typing import List, Optional, Union from sqlalchemy import and_, exists, select diff --git a/letta/services/step_manager.py b/letta/services/step_manager.py index 92733e75..d89b21f1 100644 --- a/letta/services/step_manager.py +++ b/letta/services/step_manager.py @@ -7,8 +7,12 @@ from sqlalchemy.ext.asyncio import AsyncSession from sqlalchemy.orm import Session from letta.helpers.singleton import singleton +from letta.log import get_logger + +logger = get_logger(__name__) from letta.orm.errors import NoResultFound from letta.orm.message import Message as MessageModel +from letta.orm.run import Run as RunModel from letta.orm.sqlalchemy_base import AccessType from letta.orm.step import Step as StepModel from letta.orm.step_metrics import StepMetrics as StepMetricsModel @@ -19,6 +23,7 @@ from letta.schemas.message import Message as PydanticMessage from letta.schemas.openai.chat_completion_response import UsageStatistics from letta.schemas.step import Step as PydanticStep from letta.schemas.step_metrics import StepMetrics as PydanticStepMetrics +from letta.schemas.usage import normalize_cache_tokens, normalize_reasoning_tokens from letta.schemas.user import User as PydanticUser from letta.server.db import db_registry from letta.server.rest_api.middleware.request_id import get_request_id @@ -107,6 +112,26 @@ class StepManager: error_type: Optional[str] = None, error_data: Optional[Dict] = None, ) -> PydanticStep: + # Extract normalized usage fields + cached_input_tokens = None + cache_write_tokens = None + reasoning_tokens = None + prompt_tokens_details = None + completion_tokens_details = None + + if usage.prompt_tokens_details: + prompt_tokens_details = usage.prompt_tokens_details.model_dump() + cached_input, cache_write = normalize_cache_tokens(usage.prompt_tokens_details) + if cached_input > 0: + cached_input_tokens = cached_input + if cache_write > 0: + cache_write_tokens = cache_write + if usage.completion_tokens_details: + completion_tokens_details = usage.completion_tokens_details.model_dump() + reasoning = normalize_reasoning_tokens(usage.completion_tokens_details) + if reasoning > 0: + reasoning_tokens = reasoning + step_data = { "origin": None, "organization_id": actor.organization_id, @@ -115,11 +140,17 @@ class StepManager: "provider_name": provider_name, "provider_category": provider_category, "model": model, + "model_handle": None, "model_endpoint": model_endpoint, "context_window_limit": context_window_limit, "completion_tokens": usage.completion_tokens, "prompt_tokens": usage.prompt_tokens, "total_tokens": usage.total_tokens, + "cached_input_tokens": cached_input_tokens, + "cache_write_tokens": cache_write_tokens, + "reasoning_tokens": reasoning_tokens, + "prompt_tokens_details": prompt_tokens_details, + "completion_tokens_details": completion_tokens_details, "run_id": run_id, "tags": [], "tid": None, @@ -166,6 +197,7 @@ class StepManager: error_type: Optional[str] = None, error_data: Optional[Dict] = None, allow_partial: Optional[bool] = False, + model_handle: Optional[str] = None, ) -> PydanticStep: step_data = { "origin": None, @@ -203,11 +235,15 @@ class StepManager: except NoResultFound: pass + if run_id: + run_exists = await session.get(RunModel, run_id) + if not run_exists: + logger.warning("Step run_id %s references non-existent run, setting to None", run_id) + step_data["run_id"] = None + new_step = StepModel(**step_data) await new_step.create_async(session, no_commit=True, no_refresh=True) pydantic_step = new_step.to_pydantic() - # context manager now handles commits - # await session.commit() return pydantic_step @enforce_types @@ -416,8 +452,18 @@ class StepManager: # Persist detailed token breakdowns if available if usage.prompt_tokens_details: step.prompt_tokens_details = usage.prompt_tokens_details.model_dump() + # Extract normalized cache tokens + cached_input, cache_write = normalize_cache_tokens(usage.prompt_tokens_details) + if cached_input > 0: + step.cached_input_tokens = cached_input + if cache_write > 0: + step.cache_write_tokens = cache_write if usage.completion_tokens_details: step.completion_tokens_details = usage.completion_tokens_details.model_dump() + # Extract normalized reasoning tokens + reasoning = normalize_reasoning_tokens(usage.completion_tokens_details) + if reasoning > 0: + step.reasoning_tokens = reasoning # context manager now handles commits # await session.commit() @@ -555,6 +601,12 @@ class StepManager: "base_template_id": base_template_id, } + if run_id: + run_exists = await session.get(RunModel, run_id) + if not run_exists: + logger.warning("StepMetrics run_id %s references non-existent run, setting to None", run_id) + metrics_data["run_id"] = None + metrics = StepMetricsModel(**metrics_data) await metrics.create_async(session) return metrics.to_pydantic() diff --git a/letta/services/streaming_service.py b/letta/services/streaming_service.py index 82057622..496177ef 100644 --- a/letta/services/streaming_service.py +++ b/letta/services/streaming_service.py @@ -45,6 +45,7 @@ from letta.server.rest_api.streaming_response import ( get_cancellation_event_for_run, ) from letta.server.rest_api.utils import capture_sentry_exception +from letta.services.conversation_manager import ConversationManager from letta.services.run_manager import RunManager from letta.settings import settings from letta.utils import safe_create_task @@ -102,6 +103,22 @@ class StreamingService: include_relationships=["memory", "multi_agent_group", "sources", "tool_exec_environment_variables", "tools", "tags"], ) + # Apply conversation-level model override if set (lower priority than request override) + if conversation_id and not request.override_model: + conversation = await ConversationManager().get_conversation_by_id( + conversation_id=conversation_id, + actor=actor, + ) + if conversation.model: + conversation_llm_config = await self.server.get_llm_config_from_handle_async( + actor=actor, + handle=conversation.model, + ) + if conversation.model_settings is not None: + update_params = conversation.model_settings._to_legacy_config_params() + conversation_llm_config = conversation_llm_config.model_copy(update=update_params) + agent = agent.model_copy(update={"llm_config": conversation_llm_config}) + # Handle model override if specified in the request if request.override_model: override_llm_config = await self.server.get_llm_config_from_handle_async( @@ -111,8 +128,6 @@ class StreamingService: # Create a copy of agent state with the overridden llm_config agent = agent.model_copy(update={"llm_config": override_llm_config}) - agent_eligible = self._is_agent_eligible(agent) - model_compatible = self._is_model_compatible(agent) model_compatible_token_streaming = self._is_token_streaming_compatible(agent) # Attempt to acquire conversation lock if conversation_id is provided @@ -133,67 +148,39 @@ class StreamingService: run = await self._create_run(agent_id, request, run_type, actor, conversation_id=conversation_id) await redis_client.set(f"{REDIS_RUN_ID_PREFIX}:{agent_id}", run.id if run else None) - if agent_eligible and model_compatible: - # use agent loop for streaming - agent_loop = AgentLoop.load(agent_state=agent, actor=actor) + # use agent loop for streaming + agent_loop = AgentLoop.load(agent_state=agent, actor=actor) - # create the base stream with error handling - raw_stream = self._create_error_aware_stream( - agent_loop=agent_loop, - messages=request.messages, - max_steps=request.max_steps, - stream_tokens=request.stream_tokens and model_compatible_token_streaming, - run_id=run.id if run else None, - use_assistant_message=request.use_assistant_message, - request_start_timestamp_ns=request_start_timestamp_ns, - include_return_message_types=request.include_return_message_types, - actor=actor, - conversation_id=conversation_id, - client_tools=request.client_tools, - ) + # create the base stream with error handling + raw_stream = self._create_error_aware_stream( + agent_loop=agent_loop, + messages=request.messages, + max_steps=request.max_steps, + stream_tokens=request.stream_tokens and model_compatible_token_streaming, + run_id=run.id if run else None, + use_assistant_message=request.use_assistant_message, + request_start_timestamp_ns=request_start_timestamp_ns, + include_return_message_types=request.include_return_message_types, + actor=actor, + conversation_id=conversation_id, + client_tools=request.client_tools, + include_compaction_messages=request.include_compaction_messages, + ) - # handle background streaming if requested - if request.background and settings.track_agent_run: - if isinstance(redis_client, NoopAsyncRedisClient): - raise LettaServiceUnavailableError( - f"Background streaming requires Redis to be running. " - f"Please ensure Redis is properly configured. " - f"LETTA_REDIS_HOST: {settings.redis_host}, LETTA_REDIS_PORT: {settings.redis_port}", - service_name="redis", - ) - - # Wrap the agent loop stream with cancellation awareness for background task - background_stream = raw_stream - if settings.enable_cancellation_aware_streaming and run: - background_stream = cancellation_aware_stream_wrapper( - stream_generator=raw_stream, - run_manager=self.runs_manager, - run_id=run.id, - actor=actor, - cancellation_event=get_cancellation_event_for_run(run.id), - ) - - safe_create_task( - create_background_stream_processor( - stream_generator=background_stream, - redis_client=redis_client, - run_id=run.id, - run_manager=self.server.run_manager, - actor=actor, - conversation_id=conversation_id, - ), - label=f"background_stream_processor_{run.id}", + # handle background streaming if requested + if request.background and settings.track_agent_run: + if isinstance(redis_client, NoopAsyncRedisClient): + raise LettaServiceUnavailableError( + f"Background streaming requires Redis to be running. " + f"Please ensure Redis is properly configured. " + f"LETTA_REDIS_HOST: {settings.redis_host}, LETTA_REDIS_PORT: {settings.redis_port}", + service_name="redis", ) - raw_stream = redis_sse_stream_generator( - redis_client=redis_client, - run_id=run.id, - ) - - # wrap client stream with cancellation awareness if enabled and tracking runs - stream = raw_stream - if settings.enable_cancellation_aware_streaming and settings.track_agent_run and run and not request.background: - stream = cancellation_aware_stream_wrapper( + # Wrap the agent loop stream with cancellation awareness for background task + background_stream = raw_stream + if settings.enable_cancellation_aware_streaming and run: + background_stream = cancellation_aware_stream_wrapper( stream_generator=raw_stream, run_manager=self.runs_manager, run_id=run.id, @@ -201,29 +188,43 @@ class StreamingService: cancellation_event=get_cancellation_event_for_run(run.id), ) - # conditionally wrap with keepalive based on request parameter - if request.include_pings and settings.enable_keepalive: - stream = add_keepalive_to_stream(stream, keepalive_interval=settings.keepalive_interval, run_id=run.id) + safe_create_task( + create_background_stream_processor( + stream_generator=background_stream, + redis_client=redis_client, + run_id=run.id, + run_manager=self.server.run_manager, + actor=actor, + conversation_id=conversation_id, + ), + label=f"background_stream_processor_{run.id}", + ) - result = StreamingResponseWithStatusCode( - stream, - media_type="text/event-stream", + raw_stream = redis_sse_stream_generator( + redis_client=redis_client, + run_id=run.id, ) - else: - # fallback to non-agent-loop streaming - result = await self.server.send_message_to_agent( - agent_id=agent_id, + + # wrap client stream with cancellation awareness if enabled and tracking runs + stream = raw_stream + if settings.enable_cancellation_aware_streaming and settings.track_agent_run and run and not request.background: + stream = cancellation_aware_stream_wrapper( + stream_generator=raw_stream, + run_manager=self.runs_manager, + run_id=run.id, actor=actor, - input_messages=request.messages, - stream_steps=True, - stream_tokens=request.stream_tokens, - use_assistant_message=request.use_assistant_message, - assistant_message_tool_name=request.assistant_message_tool_name, - assistant_message_tool_kwarg=request.assistant_message_tool_kwarg, - request_start_timestamp_ns=request_start_timestamp_ns, - include_return_message_types=request.include_return_message_types, + cancellation_event=get_cancellation_event_for_run(run.id), ) + # conditionally wrap with keepalive based on request parameter + if request.include_pings and settings.enable_keepalive: + stream = add_keepalive_to_stream(stream, keepalive_interval=settings.keepalive_interval, run_id=run.id) + + result = StreamingResponseWithStatusCode( + stream, + media_type="text/event-stream", + ) + # update run status to running before returning if settings.track_agent_run and run: # refetch run since it may have been updated by another service @@ -326,6 +327,7 @@ class StreamingService: actor: User, conversation_id: Optional[str] = None, client_tools: Optional[list[ClientToolSchema]] = None, + include_compaction_messages: bool = False, ) -> AsyncIterator: """ Create a stream with unified error handling. @@ -337,7 +339,6 @@ class StreamingService: async def error_aware_stream(): """Stream that handles early LLM errors gracefully in streaming format.""" run_status = None - run_update_metadata = None stop_reason = None error_data = None saw_done = False @@ -354,6 +355,7 @@ class StreamingService: include_return_message_types=include_return_message_types, conversation_id=conversation_id, client_tools=client_tools, + include_compaction_messages=include_compaction_messages, ) async for chunk in stream: @@ -455,7 +457,7 @@ class StreamingService: yield f"event: error\ndata: {error_message.model_dump_json()}\n\n" # Send [DONE] marker to properly close the stream yield "data: [DONE]\n\n" - except RunCancelledException as e: + except RunCancelledException: # Run was explicitly cancelled - this is not an error # The cancellation has already been handled by cancellation_aware_stream_wrapper logger.info(f"Run {run_id} was cancelled, exiting stream gracefully") @@ -496,30 +498,6 @@ class StreamingService: return error_aware_stream() - def _is_agent_eligible(self, agent: AgentState) -> bool: - """Check if agent is eligible for streaming.""" - return agent.multi_agent_group is None or agent.multi_agent_group.manager_type in ["sleeptime", "voice_sleeptime"] - - def _is_model_compatible(self, agent: AgentState) -> bool: - """Check if agent's model is compatible with streaming.""" - return agent.llm_config.model_endpoint_type in [ - "anthropic", - "openai", - "together", - "google_ai", - "google_vertex", - "bedrock", - "ollama", - "azure", - "xai", - "zai", - "groq", - "deepseek", - "chatgpt_oauth", - "minimax", - "openrouter", - ] - def _is_token_streaming_compatible(self, agent: AgentState) -> bool: """Check if agent's model supports token-level streaming.""" base_compatible = agent.llm_config.model_endpoint_type in [ diff --git a/letta/services/summarizer/compact.py b/letta/services/summarizer/compact.py new file mode 100644 index 00000000..6b581628 --- /dev/null +++ b/letta/services/summarizer/compact.py @@ -0,0 +1,424 @@ +"""Standalone compaction functions for message summarization.""" + +from dataclasses import dataclass +from typing import List, Optional + +from letta.helpers.message_helper import convert_message_creates_to_messages +from letta.llm_api.llm_client import LLMClient +from letta.log import get_logger +from letta.otel.tracing import trace_method +from letta.schemas.agent import AgentType +from letta.schemas.enums import MessageRole +from letta.schemas.letta_message_content import TextContent +from letta.schemas.llm_config import LLMConfig +from letta.schemas.message import Message, MessageCreate +from letta.schemas.user import User +from letta.services.summarizer.self_summarizer import self_summarize_all, self_summarize_sliding_window +from letta.services.summarizer.summarizer_all import summarize_all +from letta.services.summarizer.summarizer_config import CompactionSettings, get_default_prompt_for_mode, get_default_summarizer_model +from letta.services.summarizer.summarizer_sliding_window import ( + count_tokens, + count_tokens_with_tools, + summarize_via_sliding_window, +) +from letta.services.telemetry_manager import TelemetryManager +from letta.system import package_summarize_message_no_counts + +logger = get_logger(__name__) + + +@dataclass +class CompactResult: + """Result of a compaction operation.""" + + summary_message: Message + compacted_messages: list[Message] + summary_text: str + context_token_estimate: Optional[int] + + +async def build_summarizer_llm_config( + agent_llm_config: LLMConfig, + summarizer_config: CompactionSettings, + actor: User, +) -> LLMConfig: + """Derive an LLMConfig for summarization from a model handle. + + This mirrors the agent-creation path: start from the agent's LLMConfig, + override provider/model/handle from ``compaction_settings.model``, and + then apply any explicit ``compaction_settings.model_settings`` via + ``_to_legacy_config_params``. + + Args: + agent_llm_config: The agent's LLM configuration to use as base. + summarizer_config: Compaction settings with optional model override. + actor: The user performing the operation. + + Returns: + LLMConfig configured for summarization. + """ + from letta.schemas.enums import ProviderType + + # If no summarizer model specified, use lightweight provider-specific defaults + if not summarizer_config.model: + provider_name = agent_llm_config.provider_name or agent_llm_config.model_endpoint_type + try: + provider_type = ProviderType(provider_name) + default_model = get_default_summarizer_model(provider_type=provider_type) + if default_model: + # Use default model + summarizer_config = summarizer_config.model_copy(update={"model": default_model}) + except (ValueError, TypeError): + pass # Unknown provider - will fall back to agent's model below + + # If still no model after defaults, use agent's model + if not summarizer_config.model: + return agent_llm_config + + try: + # Load default config for the summarizer model handle, using the agent's context window + from letta.services.provider_manager import ProviderManager + + provider_manager = ProviderManager() + try: + # automatically sets the context window to the max available for the summarizer model + base = await provider_manager.get_llm_config_from_handle( + handle=summarizer_config.model, + actor=actor, + ) + except Exception as e: + logger.warning( + f"Failed to load LLM config for summarizer handle '{summarizer_config.model}': {e}. Falling back to agent's LLM config." + ) + return agent_llm_config + + # If explicit model_settings are provided for the summarizer, apply + # them just like server.create_agent_async does for agents. + if summarizer_config.model_settings is not None: + update_params = summarizer_config.model_settings._to_legacy_config_params() + return base.model_copy(update=update_params) + + return base + except Exception: + # On any error, do not break the agent – just fall back + return agent_llm_config + + +@trace_method +async def compact_messages( + actor: User, + agent_id: str, + agent_llm_config: LLMConfig, + telemetry_manager: TelemetryManager, + llm_client: LLMClient, + agent_type: AgentType, + messages: List[Message], + timezone: str, + compaction_settings: Optional[CompactionSettings] = None, + agent_tags: Optional[List[str]] = None, + tools: Optional[List[dict]] = None, # Tool json schemas + trigger_threshold: Optional[int] = None, + run_id: Optional[str] = None, + step_id: Optional[str] = None, + use_summary_role: bool = True, + trigger: Optional[str] = None, + context_tokens_before: Optional[int] = None, + messages_count_before: Optional[int] = None, +) -> CompactResult: + """Compact in-context messages using summarization. + + Args: + actor: The user performing the operation. + agent_id: The agent's ID. + agent_llm_config: The agent's LLM configuration. + messages: The in-context messages to compact. + timezone: The agent's timezone for message formatting. + compaction_settings: Optional compaction settings override. + agent_model_handle: The agent's model handle (used if compaction_settings is None). + agent_tags: The agent's tags for telemetry. + tools: The agent's tools (for token counting). + trigger_threshold: If provided, verify context stays below this after compaction. + run_id: Optional run ID for telemetry. + step_id: Optional step ID for telemetry. + use_summary_role: If True, create summary message with role=summary. + trigger: What triggered the compaction (for stats). + context_tokens_before: Token count before compaction (for stats). + messages_count_before: Message count before compaction (for stats). + + Returns: + CompactResult containing the summary message, compacted messages, summary text, + and updated context token estimate. + """ + summarizer_config = compaction_settings if compaction_settings else CompactionSettings() + + # Build the LLMConfig used for summarization + summarizer_llm_config = await build_summarizer_llm_config( + agent_llm_config=agent_llm_config, # used to set default compaction model + summarizer_config=summarizer_config, + actor=actor, + ) + + summarization_mode_used = summarizer_config.mode + if summarizer_config.mode == "self_compact_all": + try: + summary, compacted_messages = await self_summarize_all( + actor=actor, + agent_id=agent_id, + agent_llm_config=agent_llm_config, + telemetry_manager=telemetry_manager, + llm_client=llm_client, + agent_type=agent_type, + messages=messages, + compaction_settings=summarizer_config, + run_id=run_id, + step_id=step_id, + timezone=timezone, + agent_tags=agent_tags, + tools=tools, + ) + except Exception as e: + logger.error(f"Self summarization failed with exception: {str(e)}. Falling back to self sliding window mode.") + try: + fallback_config = summarizer_config.model_copy( + update={ + "mode": "self_compact_sliding_window", + "prompt": get_default_prompt_for_mode("self_compact_sliding_window"), + } + ) + summary, compacted_messages = await self_summarize_sliding_window( + actor=actor, + agent_id=agent_id, + agent_llm_config=agent_llm_config, + telemetry_manager=telemetry_manager, + llm_client=llm_client, + agent_type=agent_type, + messages=messages, + compaction_settings=fallback_config, + run_id=run_id, + step_id=step_id, + timezone=timezone, + agent_tags=agent_tags, + tools=tools, + ) + summarization_mode_used = "self_compact_sliding_window" + except Exception as e: + logger.error(f"Self sliding window summarization failed with exception: {str(e)}. Falling back to all mode.") + fallback_config = summarizer_config.model_copy( + update={ + "mode": "all", + "prompt": get_default_prompt_for_mode("all"), + } + ) + summary, compacted_messages = await summarize_all( + actor=actor, + llm_config=summarizer_llm_config, + summarizer_config=fallback_config, + in_context_messages=messages, + agent_id=agent_id, + agent_tags=agent_tags, + run_id=run_id, + step_id=step_id, + ) + summarization_mode_used = "all" + elif summarizer_config.mode == "self_compact_sliding_window": + try: + summary, compacted_messages = await self_summarize_sliding_window( + actor=actor, + agent_id=agent_id, + agent_llm_config=agent_llm_config, + telemetry_manager=telemetry_manager, + llm_client=llm_client, + agent_type=agent_type, + messages=messages, + compaction_settings=summarizer_config, + run_id=run_id, + step_id=step_id, + timezone=timezone, + agent_tags=agent_tags, + tools=tools, + ) + except Exception as e: + # Prompts for all and self mode should be similar --> can use original prompt + logger.error(f"Self sliding window summarization failed with exception: {str(e)}. Falling back to all mode.") + fallback_config = summarizer_config.model_copy( + update={ + "mode": "all", + "prompt": get_default_prompt_for_mode("all"), + } + ) + summary, compacted_messages = await summarize_all( + actor=actor, + llm_config=summarizer_llm_config, + summarizer_config=fallback_config, + in_context_messages=messages, + agent_id=agent_id, + agent_tags=agent_tags, + run_id=run_id, + step_id=step_id, + ) + summarization_mode_used = "all" + elif summarizer_config.mode == "all": + summary, compacted_messages = await summarize_all( + actor=actor, + llm_config=summarizer_llm_config, + summarizer_config=summarizer_config, + in_context_messages=messages, + agent_id=agent_id, + agent_tags=agent_tags, + run_id=run_id, + step_id=step_id, + ) + elif summarizer_config.mode == "sliding_window": + try: + summary, compacted_messages = await summarize_via_sliding_window( + actor=actor, + llm_config=summarizer_llm_config, + agent_llm_config=agent_llm_config, + summarizer_config=summarizer_config, + in_context_messages=messages, + agent_id=agent_id, + agent_tags=agent_tags, + run_id=run_id, + step_id=step_id, + ) + except Exception as e: + logger.error(f"Sliding window summarization failed with exception: {str(e)}. Falling back to all mode.") + fallback_config = summarizer_config.model_copy( + update={ + "mode": "all", + "prompt": get_default_prompt_for_mode("all"), + } + ) + summary, compacted_messages = await summarize_all( + actor=actor, + llm_config=summarizer_llm_config, + summarizer_config=fallback_config, + in_context_messages=messages, + agent_id=agent_id, + agent_tags=agent_tags, + run_id=run_id, + step_id=step_id, + ) + summarization_mode_used = "all" + else: + raise ValueError(f"Invalid summarizer mode: {summarizer_config.mode}") + + # Update the token count (including tools for accurate comparison with LLM's prompt_tokens) + context_token_estimate = await count_tokens_with_tools( + actor=actor, + llm_config=agent_llm_config, + messages=compacted_messages, + tools=tools or [], + ) + logger.info(f"Context token estimate after summarization: {context_token_estimate}") + + # If the trigger_threshold is provided, verify the new token count is below it + if trigger_threshold is not None and context_token_estimate is not None and context_token_estimate >= trigger_threshold: + logger.error( + "Summarization failed to sufficiently reduce context size: " + f"post-summarization tokens={context_token_estimate}, " + f"threshold={trigger_threshold}. " + "Attempting fallback strategies.", + ) + + # If we used the sliding window mode, try to summarize again with the all mode + if summarization_mode_used == "sliding_window": + summary, compacted_messages = await summarize_all( + actor=actor, + llm_config=agent_llm_config, + summarizer_config=summarizer_config, + in_context_messages=compacted_messages, + agent_id=agent_id, + agent_tags=agent_tags, + run_id=run_id, + step_id=step_id, + ) + summarization_mode_used = "all" + + context_token_estimate = await count_tokens_with_tools( + actor=actor, + llm_config=agent_llm_config, + messages=compacted_messages, + tools=tools or [], + ) + + # Final edge case: check if we're still over threshold + if context_token_estimate is not None and context_token_estimate >= trigger_threshold: + # Check if system prompt is the cause + system_prompt_token_estimate = await count_tokens( + actor=actor, + llm_config=agent_llm_config, + messages=[compacted_messages[0]], + ) + if system_prompt_token_estimate is not None and system_prompt_token_estimate >= agent_llm_config.context_window: + from letta.errors import SystemPromptTokenExceededError + + raise SystemPromptTokenExceededError( + system_prompt_token_estimate=system_prompt_token_estimate, + context_window=agent_llm_config.context_window, + ) + + # Log error but don't brick the agent + logger.error(f"Failed to summarize messages after fallback: {context_token_estimate} > {trigger_threshold}") + else: + logger.info(f"Summarization fallback succeeded: {context_token_estimate} < {trigger_threshold}") + + # Build compaction stats if we have the before values + compaction_stats = None + if trigger and context_tokens_before is not None and messages_count_before is not None: + compaction_stats = { + "trigger": trigger, + "context_tokens_before": context_tokens_before, + "context_tokens_after": context_token_estimate, + "context_window": agent_llm_config.context_window, + "messages_count_before": messages_count_before, + "messages_count_after": len(compacted_messages) + 1, + } + + # Create the summary message + summary_message_str_packed = package_summarize_message_no_counts( + summary=summary, + timezone=timezone, + compaction_stats=compaction_stats, + mode=summarization_mode_used, + ) + + if use_summary_role: + # New behavior: Create Message directly with role=summary + summary_message_obj = Message( + role=MessageRole.summary, + content=[TextContent(text=summary_message_str_packed)], + agent_id=agent_id, + run_id=run_id, + step_id=step_id, + ) + else: + # Legacy behavior: Use convert_message_creates_to_messages with role=user + summary_messages = await convert_message_creates_to_messages( + message_creates=[ + MessageCreate( + role=MessageRole.user, + content=[TextContent(text=summary_message_str_packed)], + ) + ], + agent_id=agent_id, + timezone=timezone, + wrap_user_message=False, + wrap_system_message=False, + run_id=run_id, + ) + if len(summary_messages) != 1: + logger.error(f"Expected only one summary message, got {len(summary_messages)}") + summary_message_obj = summary_messages[0] + + # Build final messages: [system] + [summary] + remaining compacted messages + final_messages = [compacted_messages[0], summary_message_obj] + if len(compacted_messages) > 1: + final_messages += compacted_messages[1:] + + return CompactResult( + summary_message=summary_message_obj, + compacted_messages=final_messages, + summary_text=summary, + context_token_estimate=context_token_estimate, + ) diff --git a/letta/services/summarizer/self_summarizer.py b/letta/services/summarizer/self_summarizer.py new file mode 100644 index 00000000..505f3e36 --- /dev/null +++ b/letta/services/summarizer/self_summarizer.py @@ -0,0 +1,283 @@ +"""Claude Code-style summarization where agent self-summarizes using its own LLM.""" + +from typing import List, Optional, Tuple + +from letta.llm_api.llm_client import LLMClient +from letta.log import get_logger +from letta.otel.tracing import trace_method +from letta.schemas.agent import AgentType +from letta.schemas.enums import MessageRole, ProviderType +from letta.schemas.letta_message_content import TextContent +from letta.schemas.llm_config import LLMConfig +from letta.schemas.message import Message +from letta.schemas.user import User +from letta.services.summarizer.summarizer_config import CompactionSettings, get_default_prompt_for_mode +from letta.services.summarizer.summarizer_sliding_window import count_tokens +from letta.services.telemetry_manager import TelemetryManager + +logger = get_logger(__name__) + + +@trace_method +async def self_summarize_all( + actor: User, + agent_id: str, + agent_llm_config: LLMConfig, + telemetry_manager: TelemetryManager, + llm_client: LLMClient, + agent_type: AgentType, + messages: List[Message], + compaction_settings: CompactionSettings, + timezone: str, + run_id: Optional[str] = None, + step_id: Optional[str] = None, + agent_tags: Optional[List[str]] = None, + # For cache compatibility with regular agent requests + tools: Optional[List[dict]] = None, +) -> Tuple[str, List[Message], str]: + """Summary request is added as a user message, then the agent's LLM is called with the messages + request. + The agent's summary response is parsed and returned. + """ + logger.info(f"Starting self-summarization for {len(messages)} messages") + + # Protect system message and handle last message + if len(messages) < 2: + logger.warning("Too few messages to summarize") + return "No conversation to summarize.", messages + + system_message = messages[0] + + # Cutoff rules for what you can/can't separate + messages_to_summarize, protected_messages = _get_protected_messages(messages) + + # Create the summary request message + if compaction_settings.prompt is None: + compaction_settings.prompt = get_default_prompt_for_mode(compaction_settings.mode) + + logger.info(f"Summarizing {len(messages)} messages with prompt: {compaction_settings.prompt[:100]}...") + summary_request_message = Message( + role=MessageRole.user, + content=[TextContent(text=compaction_settings.prompt)], + agent_id=agent_id, + ) + + # If the last message is not an assistant message, add a dummy assistant message to prevent LLM from continuing the conversation + if messages_to_summarize[-1].role != MessageRole.assistant: + messages_with_request = [ + *messages_to_summarize, + Message(role=MessageRole.assistant, content=[TextContent(text="I understand. Let me summarize.")], agent_id=agent_id), + summary_request_message, + ] + logger.info( + f"Calling agent's LLM for self-summarization with {len(messages_with_request)} messages ({len(messages_to_summarize)} in-context + 1 dummy assistant message + 1 summary request)" + ) + else: + # Last message is already assistant, safe to append user directly + messages_with_request = [*messages_to_summarize, summary_request_message] + logger.info( + f"Calling agent's LLM for self-summarization with {len(messages_with_request)} messages ({len(messages_to_summarize)} in-context + 1 summary request)" + ) + + # Set telemetry context + llm_client.set_telemetry_context( + telemetry_manager=telemetry_manager, + agent_id=agent_id, + agent_tags=agent_tags, + run_id=run_id, + step_id=step_id, + call_type="summarization", + org_id=actor.organization_id if actor.organization_id else None, + user_id=actor.id if actor.id else None, + compaction_settings=compaction_settings.model_dump() if compaction_settings else None, + actor=actor, + ) + + # Build request data using agent's llm_client + # Match params used by agent_v3 for cache compatibility + request_data = llm_client.build_request_data( + agent_type, + messages_with_request, + agent_llm_config, + tools=tools, + force_tool_call=None, # Don't force tool calls during summarization + requires_subsequent_tool_call=False, + # tool_return_truncation_chars=TOOL_RETURN_TRUNCATION_CHARS, + ) + + # Match parallel_tool_calls setting from agent's llm_config for cache compatibility + # This mirrors the logic in letta_agent_v3.py step processing + if agent_llm_config.model_endpoint_type in [ProviderType.anthropic, ProviderType.bedrock]: + if isinstance(request_data.get("tool_choice"), dict) and "disable_parallel_tool_use" in request_data["tool_choice"]: + if agent_llm_config.parallel_tool_calls: + request_data["tool_choice"]["disable_parallel_tool_use"] = False + else: + request_data["tool_choice"]["disable_parallel_tool_use"] = True + + # Call LLM by sending a message + from letta.services.summarizer.summarizer import _run_summarizer_request + + try: + summary_text = await _run_summarizer_request(request_data, messages_with_request, agent_llm_config, llm_client) + except Exception as e: + logger.error(f"Self-summarization request failed: {e}") + + # handle LLM error (likely a context window exceeded error) + try: + raise llm_client.handle_llm_error(e, llm_config=agent_llm_config) + except Exception as e: + logger.error(f"Self-summarization request failed: {e}") + raise e + + # Clip if needed + if compaction_settings.clip_chars is not None and len(summary_text) > compaction_settings.clip_chars: + logger.warning(f"CC summary length {len(summary_text)} exceeds clip length {compaction_settings.clip_chars}. Truncating.") + summary_text = summary_text[: compaction_settings.clip_chars] + "... [summary truncated to fit]" + + # Build final messages: [system] + protected messages + # Summary message handling is done in compact parent function + final_messages = [system_message] + if protected_messages: + final_messages += protected_messages + + logger.info( + f"Self-summarization complete. Summary length: {len(summary_text)} chars. Keeping {len(protected_messages)} protected messages." + ) + + return summary_text, final_messages + + +@trace_method +async def self_summarize_sliding_window( + actor: User, + agent_id: str, + agent_llm_config: LLMConfig, + telemetry_manager: TelemetryManager, + llm_client: LLMClient, + agent_type: AgentType, + messages: List[Message], + compaction_settings: CompactionSettings, + timezone: str, + run_id: Optional[str] = None, + step_id: Optional[str] = None, + agent_tags: Optional[List[str]] = None, + # For cache compatibility with regular agent requests + tools: Optional[List[dict]] = None, +) -> Tuple[Message, List[Message], str]: + """Summary request is added as a user message, then the agent's LLM is called with the messages + request. + The agent's summary response is parsed and returned. + """ + logger.info("Starting self-summarization with sliding window mode") + # Protect system message and handle last message + if len(messages) < 2: + logger.warning("Too few messages to summarize") + return "No conversation to summarize.", messages + + system_prompt = messages[0] + + # cannot evict a pending approval request (will cause client-side errors) + total_message_count = len(messages) + if messages[-1].role == MessageRole.approval: + maximum_message_index = total_message_count - 2 + else: + maximum_message_index = total_message_count - 1 + + eviction_percentage = compaction_settings.sliding_window_percentage + assert compaction_settings.sliding_window_percentage <= 1.0, "Sliding window percentage must be less than or equal to 1.0" + assistant_message_index = None + + goal_tokens = (1 - compaction_settings.sliding_window_percentage) * agent_llm_config.context_window + approx_token_count = agent_llm_config.context_window + + # allow approvals to be cutoffs (for headless agents) but ensure proper grouping with tool calls + def is_valid_cutoff(message: Message): + if message.role == MessageRole.assistant: + return True + if message.role == MessageRole.approval: + return message.tool_calls is not None and len(message.tool_calls) > 0 + return False + + post_summarization_buffer = [system_prompt] + while approx_token_count >= goal_tokens and eviction_percentage < 1.0: + # more eviction percentage + eviction_percentage += 0.10 + + # calculate message_cutoff_index + message_cutoff_index = round(eviction_percentage * total_message_count) + + # get index of first assistant message after the cutoff point () + assistant_message_index = next( + (i for i in reversed(range(1, message_cutoff_index + 1)) if i < len(messages) and is_valid_cutoff(messages[i])), + None, + ) + if assistant_message_index is None: + logger.warning( + f"No assistant/approval message found for evicting up to index {message_cutoff_index}, incrementing eviction percentage" + ) + continue + + # update token count + logger.info(f"Attempting to compact messages to index {assistant_message_index} messages") + post_summarization_buffer = [system_prompt, *messages[assistant_message_index:]] + approx_token_count = await count_tokens(actor, agent_llm_config, post_summarization_buffer) + logger.info( + f"Compacting messages index 1:{assistant_message_index} messages resulted in {approx_token_count} tokens, goal is {goal_tokens}" + ) + + if assistant_message_index is None or eviction_percentage >= 1.0: + raise ValueError("No assistant message found for sliding window summarization") # fall back to complete summarization + + if assistant_message_index >= maximum_message_index: + # need to keep the last message (might contain an approval request) + raise ValueError(f"Assistant message index {assistant_message_index} is at the end of the message buffer, skipping summarization") + + messages_to_summarize = messages[:assistant_message_index] + logger.info( + f"Summarizing {len(messages_to_summarize)} messages with self summarization sliding window, from index 1 to {assistant_message_index} (out of {total_message_count})" + ) + + # pass in messages_to_summarize instead of messages + summary_text, final_messages = await self_summarize_all( + actor=actor, + agent_id=agent_id, + agent_llm_config=agent_llm_config, + telemetry_manager=telemetry_manager, + llm_client=llm_client, + agent_type=agent_type, + messages=messages_to_summarize, + compaction_settings=compaction_settings, + timezone=timezone, + run_id=run_id, + step_id=step_id, + agent_tags=agent_tags, + tools=tools, + ) + + # final_messages should just be the system prompt + return summary_text, final_messages + post_summarization_buffer + + +def _get_protected_messages(in_context_messages: List[Message]) -> Tuple[List[Message], List[Message]]: + """Determine which messages to keep in context window.""" + if in_context_messages[-1].role == MessageRole.approval: + # cannot evict a pending approval request (will cause client-side errors) + # Also protect the assistant message before it if they share the same step_id + # (both are part of the same LLM response - assistant has thinking/tool_calls, approval has approval-required subset) + protected_messages = [in_context_messages[-1]] + + # Check if the message before approval is also from the same step (has reasoning/tool_calls) + if len(in_context_messages) >= 2: + potential_assistant = in_context_messages[-2] + approval_request = in_context_messages[-1] + if potential_assistant.role == MessageRole.assistant and potential_assistant.step_id == approval_request.step_id: + # They're part of the same LLM response - protect both + protected_messages = [potential_assistant, approval_request] + messages_to_summarize = in_context_messages[:-2] + else: + messages_to_summarize = in_context_messages[:-1] + else: + messages_to_summarize = in_context_messages[:-1] + else: + messages_to_summarize = in_context_messages + protected_messages = [] + + return messages_to_summarize, protected_messages diff --git a/letta/services/summarizer/summarizer.py b/letta/services/summarizer/summarizer.py index 64e9f8ba..4408e628 100644 --- a/letta/services/summarizer/summarizer.py +++ b/letta/services/summarizer/summarizer.py @@ -1,7 +1,9 @@ -import asyncio import json -import traceback -from typing import List, Optional, Tuple, Union +from typing import TYPE_CHECKING, List, Optional, Tuple, Union + +if TYPE_CHECKING: + from letta.agents.voice_sleeptime_agent import VoiceSleeptimeAgent + from letta.services.telemetry_manager import TelemetryManager from letta.agents.ephemeral_summary_agent import EphemeralSummaryAgent from letta.constants import ( @@ -16,8 +18,8 @@ from letta.llm_api.llm_client import LLMClient from letta.log import get_logger from letta.otel.tracing import trace_method from letta.prompts import gpt_summarize -from letta.schemas.enums import AgentType, MessageRole, ProviderType -from letta.schemas.letta_message_content import TextContent +from letta.schemas.enums import AgentType, LLMCallType, MessageRole, ProviderType +from letta.schemas.letta_message_content import ImageContent, TextContent from letta.schemas.llm_config import LLMConfig from letta.schemas.message import Message, MessageCreate from letta.schemas.user import User @@ -237,7 +239,7 @@ class Summarizer: ) updated_in_context_messages = all_in_context_messages[assistant_message_index:] - return [all_in_context_messages[0], summary_message_obj] + updated_in_context_messages, True + return [all_in_context_messages[0], summary_message_obj, *updated_in_context_messages], True def _static_buffer_summarization( self, @@ -338,7 +340,7 @@ class Summarizer: self.summarizer_agent.step([MessageCreate(role=MessageRole.user, content=[TextContent(text=summary_request_text)])]) ) - return [all_in_context_messages[0]] + updated_in_context_messages, True + return [all_in_context_messages[0], *updated_in_context_messages], True def simple_formatter( @@ -482,7 +484,7 @@ async def simple_summary( agent_tags=agent_tags, run_id=run_id, step_id=step_id, - call_type="summarization", + call_type=LLMCallType.summarization, org_id=actor.organization_id if actor else None, user_id=actor.id if actor else None, compaction_settings=compaction_settings, @@ -493,7 +495,7 @@ async def simple_summary( # Build the initial transcript without clamping to preserve fidelity # TODO proactively clip here? summary_transcript = simple_formatter(messages) - logger.info(f"Summarizing {len(messages)} messages with prompt: {system_prompt}") + logger.info(f"Summarizing {len(messages)} messages with prompt: {system_prompt[:100]}...") if include_ack: logger.info(f"Summarizing with ACK for model {llm_config.model}") @@ -517,81 +519,13 @@ async def simple_summary( summarizer_llm_config.put_inner_thoughts_in_kwargs = False summarizer_llm_config.enable_reasoner = False - async def _run_summarizer_request(req_data: dict, req_messages_obj: list[Message]) -> str: - """Run summarization request and return assistant text. - - For Anthropic, use provider-side streaming to avoid long-request failures - (Anthropic requires streaming for requests that may exceed ~10 minutes). - """ - - if summarizer_llm_config.model_endpoint_type in [ProviderType.anthropic, ProviderType.bedrock]: - logger.info( - "Summarizer: using provider streaming (%s/%s) to avoid long-request failures", - summarizer_llm_config.model_endpoint_type, - summarizer_llm_config.model, - ) - # Stream from provider and accumulate the final assistant text. - from letta.interfaces.anthropic_parallel_tool_call_streaming_interface import ( - SimpleAnthropicStreamingInterface, - ) - - interface = SimpleAnthropicStreamingInterface( - requires_approval_tools=[], - run_id=None, - step_id=None, - ) - - # AnthropicClient.stream_async sets request_data["stream"] = True internally. - stream = await llm_client.stream_async_with_telemetry(req_data, summarizer_llm_config) - async for _chunk in interface.process(stream): - # We don't emit anything; we just want the fully-accumulated content. - pass - - content_parts = interface.get_content() - text = "".join(part.text for part in content_parts if isinstance(part, TextContent)).strip() - - # Log telemetry after stream processing - await llm_client.log_provider_trace_async( - request_data=req_data, - response_json={ - "content": text, - "model": summarizer_llm_config.model, - "usage": { - "input_tokens": getattr(interface, "input_tokens", None), - "output_tokens": getattr(interface, "output_tokens", None), - }, - }, - ) - - if not text: - logger.warning("No content returned from summarizer (streaming path)") - raise Exception("Summary failed to generate") - return text - - # Default: non-streaming provider request, then normalize via chat-completions conversion. - logger.debug( - "Summarizer: using non-streaming request (%s/%s)", - summarizer_llm_config.model_endpoint_type, - summarizer_llm_config.model, - ) - response_data = await llm_client.request_async_with_telemetry(req_data, summarizer_llm_config) - response = await llm_client.convert_response_to_chat_completion( - response_data, - req_messages_obj, - summarizer_llm_config, - ) - if response.choices[0].message.content is None: - logger.warning("No content returned from summarizer") - raise Exception("Summary failed to generate") - return response.choices[0].message.content.strip() - request_data = llm_client.build_request_data(AgentType.letta_v1_agent, input_messages_obj, summarizer_llm_config, tools=[]) try: - summary = await _run_summarizer_request(request_data, input_messages_obj) + summary = await _run_summarizer_request(request_data, input_messages_obj, summarizer_llm_config, llm_client) except Exception as e: # handle LLM error (likely a context window exceeded error) try: - raise llm_client.handle_llm_error(e) + raise llm_client.handle_llm_error(e, llm_config=llm_config) except ContextWindowExceededError as context_error: logger.warning(f"Context window exceeded during summarization. Applying clamping fallbacks. Original error: {context_error}") @@ -625,7 +559,7 @@ async def simple_summary( ) try: - summary = await _run_summarizer_request(request_data, input_messages_obj) + summary = await _run_summarizer_request(request_data, input_messages_obj, summarizer_llm_config, llm_client) except Exception as fallback_error_a: # Fallback B: hard-truncate the user transcript to fit a conservative char budget logger.warning(f"Clamped tool returns still overflowed ({fallback_error_a}). Falling back to transcript truncation.") @@ -662,11 +596,11 @@ async def simple_summary( tools=[], ) try: - summary = await _run_summarizer_request(request_data, input_messages_obj) + summary = await _run_summarizer_request(request_data, input_messages_obj, summarizer_llm_config, llm_client) except Exception as fallback_error_b: logger.error(f"Transcript truncation fallback also failed: {fallback_error_b}. Propagating error.") logger.info(f"Full fallback summarization payload: {request_data}") - raise llm_client.handle_llm_error(fallback_error_b) + raise llm_client.handle_llm_error(fallback_error_b, llm_config=llm_config) logger.info(f"Summarized {len(messages)}: {summary}") @@ -731,3 +665,84 @@ def format_transcript(messages: List[Message], include_system: bool = False) -> lines.append(f"{role}: {text}") return lines + + +@trace_method +async def _run_summarizer_request(req_data: dict, req_messages_obj: list[Message], llm_config: LLMConfig, llm_client: LLMClient) -> str: + """Run summarization request and return assistant text. + + For Anthropic, use provider-side streaming to avoid long-request failures + (Anthropic requires streaming for requests that may exceed ~10 minutes). + """ + + if llm_config.model_endpoint_type in [ProviderType.anthropic, ProviderType.bedrock]: + logger.info( + "Summarizer: using provider streaming (%s/%s) to avoid long-request failures", + llm_config.model_endpoint_type, + llm_config.model, + ) + # Stream from provider and accumulate the final assistant text. + from letta.interfaces.anthropic_parallel_tool_call_streaming_interface import ( + SimpleAnthropicStreamingInterface, + ) + + interface = SimpleAnthropicStreamingInterface( + requires_approval_tools=[], + run_id=None, + step_id=None, + ) + + # AnthropicClient.stream_async sets request_data["stream"] = True internally. + try: + stream = await llm_client.stream_async(req_data, llm_config) + async for _chunk in interface.process(stream): + pass + + content_parts = interface.get_content() + text = "".join(part.text for part in content_parts if isinstance(part, TextContent)).strip() + + await llm_client.log_provider_trace_async( + request_data=req_data, + response_json={ + "content": text, + "model": llm_config.model, + "usage": { + "input_tokens": getattr(interface, "input_tokens", None), + "output_tokens": getattr(interface, "output_tokens", None), + "cache_read_input_tokens": getattr(interface, "cache_read_tokens", 0), # cache read + "cache_creation_input_tokens": getattr(interface, "cache_creation_tokens", 0), # cache write + }, + }, + llm_config=llm_config, + ) + except Exception as e: + await llm_client.log_provider_trace_async( + request_data=req_data, + response_json=None, + llm_config=llm_config, + error_msg=str(e), + error_type=type(e).__name__, + ) + raise + + if not text: + logger.warning("No content returned from summarizer (streaming path)") + raise Exception("Summary failed to generate") + return text + + # Default: non-streaming provider request, then normalize via chat-completions conversion. + logger.debug( + "Summarizer: using non-streaming request (%s/%s)", + llm_config.model_endpoint_type, + llm_config.model, + ) + response_data = await llm_client.request_async_with_telemetry(req_data, llm_config) + response = await llm_client.convert_response_to_chat_completion( + response_data, + req_messages_obj, + llm_config, + ) + if response.choices[0].message.content is None: + logger.warning("No content returned from summarizer") + raise Exception("Summary failed to generate") + return response.choices[0].message.content.strip() diff --git a/letta/services/summarizer/summarizer_all.py b/letta/services/summarizer/summarizer_all.py index fc183214..aa3818d0 100644 --- a/letta/services/summarizer/summarizer_all.py +++ b/letta/services/summarizer/summarizer_all.py @@ -79,4 +79,4 @@ async def summarize_all( logger.warning(f"Summary length {len(summary_message_str)} exceeds clip length {summarizer_config.clip_chars}. Truncating.") summary_message_str = summary_message_str[: summarizer_config.clip_chars] + "... [summary truncated to fit]" - return summary_message_str, [in_context_messages[0]] + protected_messages + return summary_message_str, [in_context_messages[0], *protected_messages] diff --git a/letta/services/summarizer/summarizer_config.py b/letta/services/summarizer/summarizer_config.py index 540464fa..d130d04c 100644 --- a/letta/services/summarizer/summarizer_config.py +++ b/letta/services/summarizer/summarizer_config.py @@ -2,24 +2,47 @@ from typing import Literal from pydantic import BaseModel, Field -from letta.prompts.summarizer_prompt import ANTHROPIC_SUMMARY_PROMPT, SHORTER_SUMMARY_PROMPT +from letta.prompts.summarizer_prompt import ALL_PROMPT, SELF_ALL_PROMPT, SELF_SLIDING_PROMPT, SLIDING_PROMPT +from letta.schemas.enums import ProviderType from letta.schemas.model import ModelSettingsUnion from letta.settings import summarizer_settings +def get_default_summarizer_model(provider_type: ProviderType) -> str | None: + """Get default model for summarization for given provider type.""" + summarizer_defaults = { + ProviderType.anthropic: "anthropic/claude-haiku-4-5-20251001", + ProviderType.openai: "openai/gpt-5-mini", + ProviderType.google_ai: "google_ai/gemini-2.5-flash", + } + return summarizer_defaults.get(provider_type) + + +def get_default_prompt_for_mode(mode: Literal["all", "sliding_window", "self_compact_all", "self_compact_sliding_window"]) -> str: + """Get the default prompt for a given compaction mode. + Also used in /summarize endpoint if mode is changed and prompt is not explicitly set.""" + if mode == "self_compact_sliding_window": + return SELF_SLIDING_PROMPT + elif mode == "self_compact_all": + return SELF_ALL_PROMPT + elif mode == "sliding_window": + return SLIDING_PROMPT + else: # all + return ALL_PROMPT + + class CompactionSettings(BaseModel): """Configuration for conversation compaction / summarization. - ``model`` is the only required user-facing field – it specifies the summarizer - model handle (e.g. ``"openai/gpt-4o-mini"``). Per-model settings (temperature, + Per-model settings (temperature, max tokens, etc.) are derived from the default configuration for that handle. """ # Summarizer model handle (provider/model-name). - # This is required whenever compaction_settings is provided. - model: str = Field( - ..., - description="Model handle to use for summarization (format: provider/model-name).", + # If None, uses lightweight provider-specific defaults (e.g., haiku for Anthropic, gpt-5-mini for OpenAI). + model: str | None = Field( + default=None, + description="Model handle to use for sliding_window/all summarization (format: provider/model-name). If None, uses lightweight provider-specific defaults.", ) # Optional provider-specific model settings for the summarizer model @@ -28,7 +51,7 @@ class CompactionSettings(BaseModel): description="Optional model settings used to override defaults for the summarizer model.", ) - prompt: str = Field(default=SHORTER_SUMMARY_PROMPT, description="The prompt to use for summarization.") + prompt: str | None = Field(default=None, description="The prompt to use for summarization. If None, uses mode-specific default.") prompt_acknowledgement: bool = Field( default=False, description="Whether to include an acknowledgement post-prompt (helps prevent non-summary outputs)." ) @@ -36,8 +59,17 @@ class CompactionSettings(BaseModel): default=50000, description="The maximum length of the summary in characters. If none, no clipping is performed." ) - mode: Literal["all", "sliding_window"] = Field(default="sliding_window", description="The type of summarization technique use.") + mode: Literal["all", "sliding_window", "self_compact_all", "self_compact_sliding_window"] = Field( + default="sliding_window", description="The type of summarization technique use." + ) sliding_window_percentage: float = Field( default_factory=lambda: summarizer_settings.partial_evict_summarizer_percentage, - description="The percentage of the context window to keep post-summarization (only used in sliding window mode).", + description="The percentage of the context window to keep post-summarization (only used in sliding window modes).", ) + + # Called upon agent creation and if mode is changed in summarize endpoint request + def set_mode_specific_prompt(self): + """Set mode-specific default prompt if none provided.""" + if self.prompt is None: + self.prompt = get_default_prompt_for_mode(self.mode) + return self diff --git a/letta/services/summarizer/summarizer_sliding_window.py b/letta/services/summarizer/summarizer_sliding_window.py index 87739393..b271c3c1 100644 --- a/letta/services/summarizer/summarizer_sliding_window.py +++ b/letta/services/summarizer/summarizer_sliding_window.py @@ -1,19 +1,17 @@ -from typing import List, Optional, Tuple +from typing import TYPE_CHECKING, List, Optional, Tuple + +if TYPE_CHECKING: + from letta.schemas.tool import Tool -from letta.helpers.message_helper import convert_message_creates_to_messages from letta.log import get_logger from letta.otel.tracing import trace_method -from letta.schemas.agent import AgentState from letta.schemas.enums import MessageRole -from letta.schemas.letta_message_content import TextContent from letta.schemas.llm_config import LLMConfig -from letta.schemas.message import Message, MessageCreate +from letta.schemas.message import Message from letta.schemas.user import User from letta.services.context_window_calculator.token_counter import create_token_counter -from letta.services.message_manager import MessageManager from letta.services.summarizer.summarizer import simple_summary from letta.services.summarizer.summarizer_config import CompactionSettings -from letta.system import package_summarize_message_no_counts logger = get_logger(__name__) @@ -42,12 +40,67 @@ async def count_tokens(actor: User, llm_config: LLMConfig, messages: List[Messag return tokens +async def count_tokens_with_tools( + actor: User, + llm_config: LLMConfig, + messages: List[Message], + tools: Optional[List["Tool"]] = None, +) -> int: + """Count tokens in messages AND tool definitions. + + This provides a more accurate context token count by including tool definitions, + which are sent to the LLM but not included in the messages list. + + Args: + actor: The user making the request. + llm_config: The LLM configuration for selecting the appropriate tokenizer. + messages: The in-context messages (including system message). + tools: Optional list of Tool objects. If provided, their schemas are counted. + + Returns: + Total token count for messages + tools. + """ + # Delegate message counting to existing function + message_tokens = await count_tokens(actor, llm_config, messages) + + if not tools: + return message_tokens + + # Count tools + from openai.types.beta.function_tool import FunctionTool as OpenAITool + + from letta.services.context_window_calculator.token_counter import ApproxTokenCounter + + token_counter = create_token_counter( + model_endpoint_type=llm_config.model_endpoint_type, + model=llm_config.model, + actor=actor, + ) + + # Tools can be either Tool objects (with .json_schema) or dicts (json schemas directly) + # For compatibility with how tools need to be passed in for self compaction + tool_definitions = [ + OpenAITool(type="function", function=t.json_schema if hasattr(t, "json_schema") else t) + for t in tools + if (hasattr(t, "json_schema") and t.json_schema) or (isinstance(t, dict) and t) + ] + tool_tokens = await token_counter.count_tool_tokens(tool_definitions) if tool_definitions else 0 + + # Apply safety margin for approximate counting (message_tokens already has margin applied) + if isinstance(token_counter, ApproxTokenCounter): + tool_tokens = int(tool_tokens * APPROX_TOKEN_SAFETY_MARGIN) + + return message_tokens + tool_tokens + + @trace_method async def summarize_via_sliding_window( # Required to tag LLM calls actor: User, - # Actual summarization configuration + # LLM config for the summarizer model (used to generate the summary) llm_config: LLMConfig, + # LLM config for the agent model (used to determine context window cutoff for eviction) + agent_llm_config: LLMConfig, summarizer_config: CompactionSettings, in_context_messages: List[Message], # Telemetry context @@ -60,10 +113,10 @@ async def summarize_via_sliding_window( If the total tokens is greater than the context window limit (or force=True), then summarize and rearrange the in-context messages (with the summary in front). - Finding the summarization cutoff point (target of final post-summarize count is N% of configured context window): + Finding the summarization cutoff point (target of final post-summarize count is N% of agent's context window): 1. Start at a message index cutoff (1-N%) 2. Count tokens with system prompt, prior summary (if it exists), and messages past cutoff point (messages[0] + messages[cutoff:]) - 3. Is count(post_sum_messages) <= N% of configured context window? + 3. Is count(post_sum_messages) <= N% of agent's context window? 3a. Yes -> create new summary with [prior summary, cutoff:], and safety truncate summary with char count 3b. No -> increment cutoff by 10%, and repeat @@ -80,23 +133,31 @@ async def summarize_via_sliding_window( else: maximum_message_index = total_message_count - 1 - # Starts at N% (eg 70%), and increments up until 100% - message_count_cutoff_percent = max( - 1 - summarizer_config.sliding_window_percentage, 0.10 - ) # Some arbitrary minimum value (10%) to avoid negatives from badly configured summarizer percentage - eviction_percentage = summarizer_config.sliding_window_percentage - assert summarizer_config.sliding_window_percentage <= 1.0, "Sliding window percentage must be less than or equal to 1.0" - assistant_message_index = None - approx_token_count = llm_config.context_window - # valid_cutoff_roles = {MessageRole.assistant, MessageRole.approval} - valid_cutoff_roles = {MessageRole.assistant} - # simple version: summarize(in_context[1:round(summarizer_config.sliding_window_percentage * len(in_context_messages))]) # this evicts 30% of the messages (via summarization) and keeps the remaining 70% # problem: we need the cutoff point to be an assistant message, so will grow the cutoff point until we find an assistant message # also need to grow the cutoff point until the token count is less than the target token count - while approx_token_count >= (1 - summarizer_config.sliding_window_percentage) * llm_config.context_window and eviction_percentage < 1.0: + # Starts at N% (eg 70%), and increments up until 100% + max( + 1 - summarizer_config.sliding_window_percentage, 0.10 + ) # Some arbitrary minimum value (10%) to avoid negatives from badly configured summarizer percentage + eviction_percentage = summarizer_config.sliding_window_percentage + assert summarizer_config.sliding_window_percentage <= 1.0, "Sliding window percentage must be less than or equal to 1.0" + assistant_message_index = None + + goal_tokens = (1 - summarizer_config.sliding_window_percentage) * agent_llm_config.context_window + approx_token_count = agent_llm_config.context_window + + # allow approvals to be cutoffs (for headless agents) but ensure proper grouping with tool calls + def is_valid_cutoff(message: Message): + if message.role == MessageRole.assistant: + return True + if message.role == MessageRole.approval: + return message.tool_calls is not None and len(message.tool_calls) > 0 + return False + + while approx_token_count >= goal_tokens and eviction_percentage < 1.0: # more eviction percentage eviction_percentage += 0.10 @@ -108,20 +169,22 @@ async def summarize_via_sliding_window( ( i for i in reversed(range(1, message_cutoff_index + 1)) - if i < len(in_context_messages) and in_context_messages[i].role in valid_cutoff_roles + if i < len(in_context_messages) and is_valid_cutoff(in_context_messages[i]) ), None, ) if assistant_message_index is None: - logger.warning(f"No assistant message found for evicting up to index {message_cutoff_index}, incrementing eviction percentage") + logger.warning( + f"No assistant/approval message found for evicting up to index {message_cutoff_index}, incrementing eviction percentage" + ) continue # update token count logger.info(f"Attempting to compact messages index 1:{assistant_message_index} messages") - post_summarization_buffer = [system_prompt] + in_context_messages[assistant_message_index:] - approx_token_count = await count_tokens(actor, llm_config, post_summarization_buffer) + post_summarization_buffer = [system_prompt, *in_context_messages[assistant_message_index:]] + approx_token_count = await count_tokens(actor, agent_llm_config, post_summarization_buffer) logger.info( - f"Compacting messages index 1:{assistant_message_index} messages resulted in {approx_token_count} tokens, goal is {(1 - summarizer_config.sliding_window_percentage) * llm_config.context_window}" + f"Compacting messages index 1:{assistant_message_index} messages resulted in {approx_token_count} tokens, goal is {goal_tokens}" ) if assistant_message_index is None or eviction_percentage >= 1.0: @@ -155,9 +218,11 @@ async def summarize_via_sliding_window( }, ) + logger.info(f"\n==================\nSummary message string: {summary_message_str[:100]}...\n==================\n") + if summarizer_config.clip_chars is not None and len(summary_message_str) > summarizer_config.clip_chars: logger.warning(f"Summary length {len(summary_message_str)} exceeds clip length {summarizer_config.clip_chars}. Truncating.") summary_message_str = summary_message_str[: summarizer_config.clip_chars] + "... [summary truncated to fit]" updated_in_context_messages = in_context_messages[assistant_message_index:] - return summary_message_str, [system_prompt] + updated_in_context_messages + return summary_message_str, [system_prompt, *updated_in_context_messages] diff --git a/letta/services/telemetry_manager.py b/letta/services/telemetry_manager.py index deddb20b..c7291bfd 100644 --- a/letta/services/telemetry_manager.py +++ b/letta/services/telemetry_manager.py @@ -20,10 +20,10 @@ class TelemetryManager: Supports multiple backends for dual-write scenarios (e.g., migration). Configure via LETTA_TELEMETRY_PROVIDER_TRACE_BACKEND (comma-separated): - postgres: Store in PostgreSQL (default) - - clickhouse: Store in ClickHouse via OTEL instrumentation - - socket: Store via Unix socket to Crouton sidecar (which writes to GCS) + - clickhouse: Store in ClickHouse (reads and writes from llm_traces table) + - socket: Store via Unix socket to external sidecar - Example: LETTA_TELEMETRY_PROVIDER_TRACE_BACKEND=postgres,socket + Example: LETTA_TELEMETRY_PROVIDER_TRACE_BACKEND=postgres,clickhouse Multi-backend behavior: - Writes: Sent to ALL configured backends concurrently via asyncio.gather. diff --git a/letta/services/tool_executor/core_tool_executor.py b/letta/services/tool_executor/core_tool_executor.py index 043debba..d5b7bf76 100644 --- a/letta/services/tool_executor/core_tool_executor.py +++ b/letta/services/tool_executor/core_tool_executor.py @@ -8,13 +8,11 @@ from letta.constants import ( READ_ONLY_BLOCK_EDIT_ERROR, RETRIEVAL_QUERY_DEFAULT_PAGE_SIZE, ) -from letta.helpers.json_helpers import json_dumps -from letta.helpers.tpuf_client import should_use_tpuf_for_messages from letta.log import get_logger from letta.orm.errors import NoResultFound from letta.schemas.agent import AgentState from letta.schemas.block import BlockUpdate -from letta.schemas.enums import MessageRole, TagMatchMode +from letta.schemas.enums import MessageRole from letta.schemas.sandbox_config import SandboxConfig from letta.schemas.tool import Tool from letta.schemas.tool_execution_result import ToolExecutionResult @@ -318,14 +316,14 @@ class LettaCoreToolExecutor(ToolExecutor): await self.agent_manager.rebuild_system_prompt_async(agent_id=agent_state.id, actor=actor, force=True) return None - async def core_memory_append(self, agent_state: AgentState, actor: User, label: str, content: str) -> Optional[str]: + async def core_memory_append(self, agent_state: AgentState, actor: User, label: str, content: str) -> str: if agent_state.memory.get_block(label).read_only: raise ValueError(f"{READ_ONLY_BLOCK_EDIT_ERROR}") current_value = str(agent_state.memory.get_block(label).value) new_value = current_value + "\n" + str(content) agent_state.memory.update_block_value(label=label, value=new_value) await self.agent_manager.update_memory_if_changed_async(agent_id=agent_state.id, new_memory=agent_state.memory, actor=actor) - return None + return new_value async def core_memory_replace( self, @@ -334,7 +332,7 @@ class LettaCoreToolExecutor(ToolExecutor): label: str, old_content: str, new_content: str, - ) -> Optional[str]: + ) -> str: if agent_state.memory.get_block(label).read_only: raise ValueError(f"{READ_ONLY_BLOCK_EDIT_ERROR}") current_value = str(agent_state.memory.get_block(label).value) @@ -343,73 +341,64 @@ class LettaCoreToolExecutor(ToolExecutor): new_value = current_value.replace(str(old_content), str(new_content)) agent_state.memory.update_block_value(label=label, value=new_value) await self.agent_manager.update_memory_if_changed_async(agent_id=agent_state.id, new_memory=agent_state.memory, actor=actor) - return None + return new_value - async def memory_replace(self, agent_state: AgentState, actor: User, label: str, old_str: str, new_str: str) -> str: + async def memory_replace( + self, + agent_state: AgentState, + actor: User, + label: str, + old_string: str, + new_string: str, + ) -> str: if agent_state.memory.get_block(label).read_only: raise ValueError(f"{READ_ONLY_BLOCK_EDIT_ERROR}") - if bool(MEMORY_TOOLS_LINE_NUMBER_PREFIX_REGEX.search(old_str)): + if bool(MEMORY_TOOLS_LINE_NUMBER_PREFIX_REGEX.search(old_string)): raise ValueError( - "old_str contains a line number prefix, which is not allowed. " + "old_string contains a line number prefix, which is not allowed. " "Do not include line numbers when calling memory tools (line " "numbers are for display purposes only)." ) - if CORE_MEMORY_LINE_NUMBER_WARNING in old_str: + if CORE_MEMORY_LINE_NUMBER_WARNING in old_string: raise ValueError( - "old_str contains a line number warning, which is not allowed. " + "old_string contains a line number warning, which is not allowed. " "Do not include line number information when calling memory tools " "(line numbers are for display purposes only)." ) - if bool(MEMORY_TOOLS_LINE_NUMBER_PREFIX_REGEX.search(new_str)): + if bool(MEMORY_TOOLS_LINE_NUMBER_PREFIX_REGEX.search(new_string)): raise ValueError( - "new_str contains a line number prefix, which is not allowed. " + "new_string contains a line number prefix, which is not allowed. " "Do not include line numbers when calling memory tools (line " "numbers are for display purposes only)." ) - old_str = str(old_str).expandtabs() - new_str = str(new_str).expandtabs() + old_string = str(old_string).expandtabs() + new_string = str(new_string).expandtabs() current_value = str(agent_state.memory.get_block(label).value).expandtabs() - # Check if old_str is unique in the block - occurences = current_value.count(old_str) + # Check if old_string is unique in the block + occurences = current_value.count(old_string) if occurences == 0: raise ValueError( - f"No replacement was performed, old_str `{old_str}` did not appear verbatim in memory block with label `{label}`." + f"No replacement was performed, old_string `{old_string}` did not appear verbatim in memory block with label `{label}`." ) elif occurences > 1: content_value_lines = current_value.split("\n") - lines = [idx + 1 for idx, line in enumerate(content_value_lines) if old_str in line] + lines = [idx + 1 for idx, line in enumerate(content_value_lines) if old_string in line] raise ValueError( - f"No replacement was performed. Multiple occurrences of old_str `{old_str}` in lines {lines}. Please ensure it is unique." + f"No replacement was performed. Multiple occurrences of old_string `{old_string}` in lines {lines}. Please ensure it is unique." ) - # Replace old_str with new_str - new_value = current_value.replace(str(old_str), str(new_str)) + # Replace old_string with new_string + new_value = current_value.replace(str(old_string), str(new_string)) # Write the new content to the block agent_state.memory.update_block_value(label=label, value=new_value) await self.agent_manager.update_memory_if_changed_async(agent_id=agent_state.id, new_memory=agent_state.memory, actor=actor) - # Create a snippet of the edited section - SNIPPET_LINES = 3 - replacement_line = current_value.split(old_str)[0].count("\n") - start_line = max(0, replacement_line - SNIPPET_LINES) - end_line = replacement_line + SNIPPET_LINES + new_str.count("\n") - snippet = "\n".join(new_value.split("\n")[start_line : end_line + 1]) - - # Prepare the success message - success_msg = ( - f"The core memory block with label `{label}` has been successfully edited. " - f"Your system prompt has been recompiled with the updated memory contents and is now active in your context. " - f"Review the changes and make sure they are as expected (correct indentation, " - f"no duplicate lines, etc). Edit the memory block again if necessary." - ) - - # return None - return success_msg + return new_value async def memory_apply_patch(self, agent_state: AgentState, actor: User, label: str, patch: str) -> str: """Apply a simplified unified-diff style patch to one or more memory blocks. @@ -545,12 +534,7 @@ class LettaCoreToolExecutor(ToolExecutor): agent_state.memory.update_block_value(label=label, value=new_value) await self.agent_manager.update_memory_if_changed_async(agent_id=agent_state.id, new_memory=agent_state.memory, actor=actor) - return ( - f"The core memory block with label `{label}` has been successfully edited. " - f"Your system prompt has been recompiled with the updated memory contents and is now active in your context. " - f"Review the changes and make sure they are as expected (correct indentation, no duplicate lines, etc). " - f"Edit the memory block again if necessary." - ) + return new_value # Extended mode: parse codex-like patch operations for memory blocks lines = patch.splitlines() @@ -642,7 +626,7 @@ class LettaCoreToolExecutor(ToolExecutor): if kind == "add": try: - existing = agent_state.memory.get_block(action["label"]) + agent_state.memory.get_block(action["label"]) # If we get here, the block exists raise ValueError(f"Error: Memory block '{action['label']}' already exists") except KeyError: @@ -701,27 +685,27 @@ class LettaCoreToolExecutor(ToolExecutor): agent_state: AgentState, actor: User, label: str, - new_str: str, + new_string: str, insert_line: int = -1, ) -> str: if agent_state.memory.get_block(label).read_only: raise ValueError(f"{READ_ONLY_BLOCK_EDIT_ERROR}") - if bool(MEMORY_TOOLS_LINE_NUMBER_PREFIX_REGEX.search(new_str)): + if bool(MEMORY_TOOLS_LINE_NUMBER_PREFIX_REGEX.search(new_string)): raise ValueError( - "new_str contains a line number prefix, which is not allowed. Do not " + "new_string contains a line number prefix, which is not allowed. Do not " "include line numbers when calling memory tools (line numbers are for " "display purposes only)." ) - if CORE_MEMORY_LINE_NUMBER_WARNING in new_str: + if CORE_MEMORY_LINE_NUMBER_WARNING in new_string: raise ValueError( - "new_str contains a line number warning, which is not allowed. Do not " + "new_string contains a line number warning, which is not allowed. Do not " "include line number information when calling memory tools (line numbers " "are for display purposes only)." ) current_value = str(agent_state.memory.get_block(label).value).expandtabs() - new_str = str(new_str).expandtabs() + new_string = str(new_string).expandtabs() current_value_lines = current_value.split("\n") n_lines = len(current_value_lines) @@ -737,32 +721,24 @@ class LettaCoreToolExecutor(ToolExecutor): # Insert the new string as a line SNIPPET_LINES = 3 - new_str_lines = new_str.split("\n") - new_value_lines = current_value_lines[:insert_line] + new_str_lines + current_value_lines[insert_line:] + new_string_lines = new_string.split("\n") + new_value_lines = current_value_lines[:insert_line] + new_string_lines + current_value_lines[insert_line:] snippet_lines = ( current_value_lines[max(0, insert_line - SNIPPET_LINES) : insert_line] - + new_str_lines + + new_string_lines + current_value_lines[insert_line : insert_line + SNIPPET_LINES] ) # Collate into the new value to update new_value = "\n".join(new_value_lines) - snippet = "\n".join(snippet_lines) + "\n".join(snippet_lines) # Write into the block agent_state.memory.update_block_value(label=label, value=new_value) await self.agent_manager.update_memory_if_changed_async(agent_id=agent_state.id, new_memory=agent_state.memory, actor=actor) - # Prepare the success message - success_msg = ( - f"The core memory block with label `{label}` has been successfully edited. " - f"Your system prompt has been recompiled with the updated memory contents and is now active in your context. " - f"Review the changes and make sure they are as expected (correct indentation, " - f"no duplicate lines, etc). Edit the memory block again if necessary." - ) - - return success_msg + return new_value async def memory_rethink(self, agent_state: AgentState, actor: User, label: str, new_memory: str) -> str: if agent_state.memory.get_block(label).read_only: @@ -794,16 +770,7 @@ class LettaCoreToolExecutor(ToolExecutor): await self.agent_manager.update_memory_if_changed_async(agent_id=agent_state.id, new_memory=agent_state.memory, actor=actor) - # Prepare the success message - success_msg = ( - f"The core memory block with label `{label}` has been successfully edited. " - f"Your system prompt has been recompiled with the updated memory contents and is now active in your context. " - f"Review the changes and make sure they are as expected (correct indentation, " - f"no duplicate lines, etc). Edit the memory block again if necessary." - ) - - # return None - return success_msg + return new_memory async def memory_finish_edits(self, agent_state: AgentState, actor: User) -> None: return None @@ -914,7 +881,14 @@ class LettaCoreToolExecutor(ToolExecutor): f"Your system prompt has been recompiled with the new memory block and is now active in your context." ) - async def memory_str_replace(self, agent_state: AgentState, actor: User, path: str, old_str: str, new_str: str) -> str: + async def memory_str_replace( + self, + agent_state: AgentState, + actor: User, + path: str, + old_string: str, + new_string: str, + ) -> str: """Replace text in a memory block.""" label = path.removeprefix("/memories/").removeprefix("/") @@ -925,58 +899,54 @@ class LettaCoreToolExecutor(ToolExecutor): if memory_block.read_only: raise ValueError(f"{READ_ONLY_BLOCK_EDIT_ERROR}") - if bool(MEMORY_TOOLS_LINE_NUMBER_PREFIX_REGEX.search(old_str)): + if bool(MEMORY_TOOLS_LINE_NUMBER_PREFIX_REGEX.search(old_string)): raise ValueError( - "old_str contains a line number prefix, which is not allowed. " + "old_string contains a line number prefix, which is not allowed. " "Do not include line numbers when calling memory tools (line " "numbers are for display purposes only)." ) - if CORE_MEMORY_LINE_NUMBER_WARNING in old_str: + if CORE_MEMORY_LINE_NUMBER_WARNING in old_string: raise ValueError( - "old_str contains a line number warning, which is not allowed. " + "old_string contains a line number warning, which is not allowed. " "Do not include line number information when calling memory tools " "(line numbers are for display purposes only)." ) - if bool(MEMORY_TOOLS_LINE_NUMBER_PREFIX_REGEX.search(new_str)): + if bool(MEMORY_TOOLS_LINE_NUMBER_PREFIX_REGEX.search(new_string)): raise ValueError( - "new_str contains a line number prefix, which is not allowed. " + "new_string contains a line number prefix, which is not allowed. " "Do not include line numbers when calling memory tools (line " "numbers are for display purposes only)." ) - old_str = str(old_str).expandtabs() - new_str = str(new_str).expandtabs() + old_string = str(old_string).expandtabs() + new_string = str(new_string).expandtabs() current_value = str(memory_block.value).expandtabs() - # Check if old_str is unique in the block - occurences = current_value.count(old_str) + # Check if old_string is unique in the block + occurences = current_value.count(old_string) if occurences == 0: raise ValueError( - f"No replacement was performed, old_str `{old_str}` did not appear verbatim in memory block with label `{label}`." + f"No replacement was performed, old_string `{old_string}` did not appear verbatim in memory block with label `{label}`." ) elif occurences > 1: content_value_lines = current_value.split("\n") - lines = [idx + 1 for idx, line in enumerate(content_value_lines) if old_str in line] + lines = [idx + 1 for idx, line in enumerate(content_value_lines) if old_string in line] raise ValueError( - f"No replacement was performed. Multiple occurrences of old_str `{old_str}` in lines {lines}. Please ensure it is unique." + f"No replacement was performed. Multiple occurrences of old_string `{old_string}` in lines {lines}. Please ensure it is unique." ) - # Replace old_str with new_str - new_value = current_value.replace(str(old_str), str(new_str)) + # Replace old_string with new_string + new_value = current_value.replace(str(old_string), str(new_string)) # Write the new content to the block await self.block_manager.update_block_async(block_id=memory_block.id, block_update=BlockUpdate(value=new_value), actor=actor) + + # Keep in-memory AgentState consistent with DB + agent_state.memory.update_block_value(label=label, value=new_value) + await self.agent_manager.rebuild_system_prompt_async(agent_id=agent_state.id, actor=actor, force=True) - # Prepare the success message - success_msg = ( - f"The core memory block with label `{label}` has been successfully edited. " - f"Your system prompt has been recompiled with the updated memory contents and is now active in your context. " - f"Review the changes and make sure they are as expected (correct indentation, " - f"no duplicate lines, etc). Edit the memory block again if necessary." - ) - - return success_msg + return new_value async def memory_str_insert(self, agent_state: AgentState, actor: User, path: str, insert_text: str, insert_line: int = -1) -> str: """Insert text into a memory block at a specific line.""" @@ -1029,21 +999,17 @@ class LettaCoreToolExecutor(ToolExecutor): # Collate into the new value to update new_value = "\n".join(new_value_lines) - snippet = "\n".join(snippet_lines) + "\n".join(snippet_lines) # Write into the block await self.block_manager.update_block_async(block_id=memory_block.id, block_update=BlockUpdate(value=new_value), actor=actor) + + # Keep in-memory AgentState consistent with DB + agent_state.memory.update_block_value(label=label, value=new_value) + await self.agent_manager.rebuild_system_prompt_async(agent_id=agent_state.id, actor=actor, force=True) - # Prepare the success message - success_msg = ( - f"The core memory block with label `{label}` has been successfully edited. " - f"Your system prompt has been recompiled with the updated memory contents and is now active in your context. " - f"Review the changes and make sure they are as expected (correct indentation, " - f"no duplicate lines, etc). Edit the memory block again if necessary." - ) - - return success_msg + return new_value async def memory( self, diff --git a/letta/services/tool_executor/files_tool_executor.py b/letta/services/tool_executor/files_tool_executor.py index d05d42f3..ed0b40b9 100644 --- a/letta/services/tool_executor/files_tool_executor.py +++ b/letta/services/tool_executor/files_tool_executor.py @@ -189,7 +189,7 @@ class LettaFileToolExecutor(ToolExecutor): visible_content = "\n".join(content_lines) # Handle LRU eviction and file opening - closed_files, was_already_open, previous_ranges = await self.files_agents_manager.enforce_max_open_files_and_open( + closed_files, _was_already_open, previous_ranges = await self.files_agents_manager.enforce_max_open_files_and_open( agent_id=agent_state.id, file_id=file_id, file_name=file_name, @@ -683,7 +683,7 @@ class LettaFileToolExecutor(ToolExecutor): summary = f"Found {total_hits} matches in {file_count} file{'s' if file_count != 1 else ''} for query: '{query}'" # combine all results - formatted_results = [summary, "=" * len(summary)] + results + formatted_results = [summary, "=" * len(summary), *results] self.logger.info(f"Turbopuffer search completed: {total_hits} matches across {file_count} files") return "\n".join(formatted_results) @@ -780,7 +780,7 @@ class LettaFileToolExecutor(ToolExecutor): summary = f"Found {total_hits} Pinecone matches in {file_count} file{'s' if file_count != 1 else ''} for query: '{query}'" # Combine all results - formatted_results = [summary, "=" * len(summary)] + results + formatted_results = [summary, "=" * len(summary), *results] self.logger.info(f"Pinecone search completed: {total_hits} matches across {file_count} files") return "\n".join(formatted_results) @@ -846,7 +846,7 @@ class LettaFileToolExecutor(ToolExecutor): summary = f"Found {total_passages} semantic matches in {file_count} file{'s' if file_count != 1 else ''} for query: '{query}'" # Combine all results - formatted_results = [summary, "=" * len(summary)] + results + formatted_results = [summary, "=" * len(summary), *results] self.logger.info(f"Semantic search completed: {total_passages} matches across {file_count} files") diff --git a/letta/services/tool_executor/mcp_tool_executor.py b/letta/services/tool_executor/mcp_tool_executor.py index 3ea16161..b830dc01 100644 --- a/letta/services/tool_executor/mcp_tool_executor.py +++ b/letta/services/tool_executor/mcp_tool_executor.py @@ -65,12 +65,20 @@ class ExternalMCPToolExecutor(ToolExecutor): # Check if this is an expected MCP error (ToolError, McpError) # These are user-facing errors from the external MCP server (e.g., "No connected account found") # We handle them gracefully instead of letting them propagate as exceptions - if e.__class__.__name__ in MCP_EXPECTED_ERROR_CLASSES: - logger.info(f"MCP tool '{function_name}' returned expected error: {str(e)}") + + # Handle ExceptionGroup wrapping (Python 3.11+ async TaskGroup can wrap exceptions) + exception_to_check = e + if hasattr(e, "exceptions") and e.exceptions: + # If it's an ExceptionGroup with a single wrapped exception, unwrap it + if len(e.exceptions) == 1: + exception_to_check = e.exceptions[0] + + if exception_to_check.__class__.__name__ in MCP_EXPECTED_ERROR_CLASSES: + logger.info(f"MCP tool '{function_name}' returned expected error: {str(exception_to_check)}") error_message = get_friendly_error_msg( function_name=function_name, - exception_name=e.__class__.__name__, - exception_message=str(e), + exception_name=exception_to_check.__class__.__name__, + exception_message=str(exception_to_check), ) return ToolExecutionResult( status="error", diff --git a/letta/services/tool_executor/multi_agent_tool_executor.py b/letta/services/tool_executor/multi_agent_tool_executor.py index 8ecfc569..cdb55116 100644 --- a/letta/services/tool_executor/multi_agent_tool_executor.py +++ b/letta/services/tool_executor/multi_agent_tool_executor.py @@ -1,4 +1,3 @@ -import asyncio from typing import Any, Dict, List, Optional from letta.log import get_logger diff --git a/letta/services/tool_executor/sandbox_tool_executor.py b/letta/services/tool_executor/sandbox_tool_executor.py index ea5af641..2f903d8b 100644 --- a/letta/services/tool_executor/sandbox_tool_executor.py +++ b/letta/services/tool_executor/sandbox_tool_executor.py @@ -5,7 +5,7 @@ from letta.functions.ast_parsers import coerce_dict_args_by_annotations, get_fun from letta.log import get_logger from letta.otel.tracing import trace_method from letta.schemas.agent import AgentState -from letta.schemas.enums import SandboxType, ToolSourceType +from letta.schemas.enums import SandboxType from letta.schemas.sandbox_config import SandboxConfig from letta.schemas.tool import Tool from letta.schemas.tool_execution_result import ToolExecutionResult diff --git a/letta/services/tool_executor/tool_execution_manager.py b/letta/services/tool_executor/tool_execution_manager.py index bffce487..60d7f2a4 100644 --- a/letta/services/tool_executor/tool_execution_manager.py +++ b/letta/services/tool_executor/tool_execution_manager.py @@ -1,7 +1,7 @@ import asyncio import json import traceback -from typing import Any, Dict, Optional, Type +from typing import Any, ClassVar, Dict, Optional, Type from letta.constants import FUNCTION_RETURN_VALUE_TRUNCATED from letta.helpers.datetime_helpers import AsyncTimer @@ -33,7 +33,7 @@ from letta.utils import get_friendly_error_msg class ToolExecutorFactory: """Factory for creating appropriate tool executors based on tool type.""" - _executor_map: Dict[ToolType, Type[ToolExecutor]] = { + _executor_map: ClassVar[Dict[ToolType, Type[ToolExecutor]]] = { ToolType.LETTA_CORE: LettaCoreToolExecutor, ToolType.LETTA_MEMORY_CORE: LettaCoreToolExecutor, ToolType.LETTA_SLEEPTIME_CORE: LettaCoreToolExecutor, diff --git a/letta/services/tool_executor/tool_execution_sandbox.py b/letta/services/tool_executor/tool_execution_sandbox.py index 48b52fe8..ebc18dd6 100644 --- a/letta/services/tool_executor/tool_execution_sandbox.py +++ b/letta/services/tool_executor/tool_execution_sandbox.py @@ -7,7 +7,10 @@ import sys import tempfile import traceback import uuid -from typing import Any, Dict, Optional +from typing import TYPE_CHECKING, Any, Dict, Optional + +if TYPE_CHECKING: + from e2b_code_interpreter import Execution, Sandbox from letta.functions.helpers import generate_model_from_args_json_schema from letta.log import get_logger @@ -256,7 +259,7 @@ class ToolExecutionSandbox: temp_file_path: str, ) -> ToolExecutionResult: status = "success" - func_return, agent_state, stderr = None, None, None + func_return, agent_state, _stderr = None, None, None old_stdout = sys.stdout old_stderr = sys.stderr diff --git a/letta/services/tool_manager.py b/letta/services/tool_manager.py index 0b46d2e9..3833d506 100644 --- a/letta/services/tool_manager.py +++ b/letta/services/tool_manager.py @@ -24,7 +24,7 @@ from letta.constants import ( MODAL_SAFE_IMPORT_MODULES, ) from letta.errors import LettaInvalidArgumentError, LettaToolNameConflictError, LettaToolNameSchemaMismatchError -from letta.functions.functions import derive_openai_json_schema, load_function_set +from letta.functions.functions import load_function_set from letta.helpers.tool_helpers import compute_tool_hash, generate_modal_function_name from letta.log import get_logger @@ -32,7 +32,6 @@ from letta.log import get_logger from letta.orm.errors import NoResultFound from letta.orm.tool import Tool as ToolModel from letta.otel.tracing import trace_method, tracer -from letta.schemas.agent import AgentState from letta.schemas.enums import PrimitiveType, SandboxType, ToolType from letta.schemas.tool import Tool as PydanticTool, ToolCreate, ToolUpdate from letta.schemas.user import User as PydanticUser @@ -48,7 +47,7 @@ logger = get_logger(__name__) # NOTE: function name and nested modal function decorator name must stay in sync with MODAL_DEFAULT_TOOL_NAME -def modal_tool_wrapper(tool: PydanticTool, actor: PydanticUser, sandbox_env_vars: dict = None, project_id: str = "default"): +def modal_tool_wrapper(tool: PydanticTool, actor: PydanticUser, sandbox_env_vars: dict | None = None, project_id: str = "default"): """Create a Modal function wrapper for a tool""" import contextlib import io @@ -57,7 +56,6 @@ def modal_tool_wrapper(tool: PydanticTool, actor: PydanticUser, sandbox_env_vars from typing import Optional import modal - from letta_client import Letta packages = [str(req) for req in tool.pip_requirements] if tool.pip_requirements else [] for package in MODAL_SAFE_IMPORT_MODULES: @@ -185,7 +183,7 @@ def modal_tool_wrapper(tool: PydanticTool, actor: PydanticUser, sandbox_env_vars result = asyncio.run(tool_func(**kwargs)) else: result = tool_func(**kwargs) - except Exception as e: + except Exception: # Capture the exception and write to stderr error_occurred = True traceback.print_exc(file=stderr_capture) @@ -1297,7 +1295,6 @@ class ToolManager: @trace_method async def create_or_update_modal_app(self, tool: PydanticTool, actor: PydanticUser): """Create a Modal app with the tool function registered""" - import time import modal @@ -1345,7 +1342,7 @@ class ToolManager: """Delete a Modal app deployment for the tool""" try: # Generate the app name for this tool - modal_app_name = generate_modal_function_name(tool.id, actor.organization_id) + generate_modal_function_name(tool.id, actor.organization_id) # Try to delete the app # TODO: we need to soft delete, and then potentially stop via the CLI, no programmatic way to delete currently diff --git a/letta/services/tool_sandbox/base.py b/letta/services/tool_sandbox/base.py index 9b290f8c..42ff012d 100644 --- a/letta/services/tool_sandbox/base.py +++ b/letta/services/tool_sandbox/base.py @@ -202,7 +202,7 @@ class AsyncToolSandboxBase(ABC): lines.append("import asyncio") if inject_agent_state: - lines.extend(["import letta", "from letta import *"]) # noqa: F401 + lines.extend(["import letta", "from letta import *"]) # Import Letta client if available (wrapped in try/except for sandboxes without letta_client installed) if inject_letta_client: @@ -438,7 +438,7 @@ class AsyncToolSandboxBase(ABC): if isinstance(node, ast.AsyncFunctionDef) and node.name == self.tool.name: return True return False - except: + except Exception: return False def use_top_level_await(self) -> bool: diff --git a/letta/services/tool_sandbox/modal_sandbox.py b/letta/services/tool_sandbox/modal_sandbox.py index 4fd1cd6e..ba779f01 100644 --- a/letta/services/tool_sandbox/modal_sandbox.py +++ b/letta/services/tool_sandbox/modal_sandbox.py @@ -4,10 +4,6 @@ Model sandbox implementation, which configures on Modal App per tool. from typing import TYPE_CHECKING, Any, Dict, Optional -import modal -from e2b.sandbox.commands.command_handle import CommandExitException -from e2b_code_interpreter import AsyncSandbox - from letta.constants import MODAL_DEFAULT_TOOL_NAME from letta.log import get_logger from letta.otel.tracing import log_event, trace_method @@ -16,16 +12,13 @@ from letta.schemas.enums import SandboxType from letta.schemas.sandbox_config import SandboxConfig from letta.schemas.tool import Tool from letta.schemas.tool_execution_result import ToolExecutionResult -from letta.services.helpers.tool_parser_helper import parse_function_arguments, parse_stdout_best_effort -from letta.services.tool_manager import ToolManager from letta.services.tool_sandbox.base import AsyncToolSandboxBase from letta.types import JsonDict -from letta.utils import get_friendly_error_msg logger = get_logger(__name__) if TYPE_CHECKING: - from e2b_code_interpreter import Execution + pass class AsyncToolSandboxModal(AsyncToolSandboxBase): diff --git a/letta/services/tool_sandbox/modal_sandbox_v2.py b/letta/services/tool_sandbox/modal_sandbox_v2.py index 488df05f..fc608bdd 100644 --- a/letta/services/tool_sandbox/modal_sandbox_v2.py +++ b/letta/services/tool_sandbox/modal_sandbox_v2.py @@ -192,7 +192,7 @@ class AsyncToolSandboxModalV2(AsyncToolSandboxBase): log_event("modal_v2_deploy_already_exists", {"app_name": app_full_name, "version": version}) # Return the created app with the function attached return app - except: + except Exception: # App doesn't exist, need to deploy pass diff --git a/letta/services/tool_sandbox/safe_pickle.py b/letta/services/tool_sandbox/safe_pickle.py index b27ef985..86c36f69 100644 --- a/letta/services/tool_sandbox/safe_pickle.py +++ b/letta/services/tool_sandbox/safe_pickle.py @@ -184,7 +184,7 @@ def sanitize_for_pickle(obj: Any) -> Any: # Test if the value is pickleable pickle.dumps(value, protocol=PICKLE_PROTOCOL) sanitized[key] = value - except: + except Exception: sanitized[key] = str(value) return sanitized diff --git a/letta/services/webhook_service.py b/letta/services/webhook_service.py index 56509289..2f1e0544 100644 --- a/letta/services/webhook_service.py +++ b/letta/services/webhook_service.py @@ -1,6 +1,5 @@ import logging import os -from typing import Optional import httpx diff --git a/letta/settings.py b/letta/settings.py index f52fca3c..75bf2d06 100644 --- a/letta/settings.py +++ b/letta/settings.py @@ -6,9 +6,14 @@ from typing import Optional from pydantic import AliasChoices, Field from pydantic_settings import BaseSettings, SettingsConfigDict +# Load config file and apply to environment before settings are created +# This allows YAML config values to be picked up by pydantic-settings +from letta.config_file import apply_config_to_env from letta.schemas.enums import SandboxType from letta.services.summarizer.enums import SummarizationMode +apply_config_to_env() + # Define constants here to avoid circular import with letta.log DEFAULT_WRAPPER_NAME = "chatml" INNER_THOUGHTS_KWARG = "thinking" @@ -165,13 +170,23 @@ class ModelSettings(BaseSettings): anthropic_sonnet_1m: bool = Field( default=False, description=( - "Enable 1M-token context window for Claude Sonnet 4/4.5. When true, adds the" + "Enable 1M-token context window for Claude Sonnet 4/4.5/4.6. When true, adds the" " 'context-1m-2025-08-07' beta to Anthropic requests and sets model context_window" " to 1,000,000 instead of 200,000. Note: This feature is in beta and not available" " to all orgs; once GA, this flag can be removed and behavior can default to on." ), alias="ANTHROPIC_SONNET_1M", ) + anthropic_opus_1m: bool = Field( + default=False, + description=( + "Enable 1M-token context window for Claude Opus 4.6. When true, adds the" + " 'context-1m-2025-08-07' beta to Anthropic requests and sets model context_window" + " to 1,000,000 instead of 200,000. Note: This feature is in beta and not available" + " to all orgs; once GA, this flag can be removed and behavior can default to on." + ), + alias="ANTHROPIC_OPUS_1M", + ) # ollama ollama_base_url: Optional[str] = None @@ -260,7 +275,7 @@ class Settings(BaseSettings): # SSE Streaming keepalive settings enable_keepalive: bool = Field(True, description="Enable keepalive messages in SSE streams to prevent timeouts") - keepalive_interval: float = Field(50.0, description="Seconds between keepalive messages (default: 50)") + keepalive_interval: float = Field(20.0, description="Seconds between keepalive messages (default: 20)") # SSE Streaming cancellation settings enable_cancellation_aware_streaming: bool = Field(True, description="Enable cancellation aware streaming") @@ -291,6 +306,33 @@ class Settings(BaseSettings): plugin_register: Optional[str] = None + # Object storage (used for git-backed memory repos) + # + # Prefer configuring a single URI rather than multiple provider-specific env vars. + # Example: + # LETTA_OBJECT_STORE_URI="gs://my-bucket/repository?project=my-gcp-project" + object_store_uri: str | None = Field( + default=None, + validation_alias=AliasChoices("LETTA_OBJECT_STORE_URI"), + description="Object store URI for memory repositories (e.g., gs://bucket/prefix?project=...).", + ) + + # Optional overrides for URI query params. These are primarily useful for deployments + # where you want to keep the URI stable but inject environment-specific settings. + object_store_project: str | None = Field( + default=None, + validation_alias=AliasChoices("LETTA_OBJECT_STORE_PROJECT"), + description="Optional project override for object store clients (e.g., GCS project).", + ) + + # memfs service URL - when set, git memory operations are proxied to the memfs service + # instead of running locally. This enables separating git/GCS operations into a dedicated service. + memfs_service_url: str | None = Field( + default=None, + validation_alias=AliasChoices("LETTA_MEMFS_SERVICE_URL"), + description="URL of the memfs service (e.g., http://memfs-py:8285). When set, git memory operations use this service.", + ) + # multi agent settings multi_agent_send_message_max_retries: int = 3 multi_agent_send_message_timeout: int = 20 * 60 @@ -331,6 +373,13 @@ class Settings(BaseSettings): track_agent_run: bool = Field(default=True, description="Enable tracking agent run with cancellation support") track_provider_trace: bool = Field(default=True, description="Enable tracking raw llm request and response at each step") + # LLM trace storage for analytics (direct ClickHouse, bypasses OTEL for large payloads) + # TTL is configured in the ClickHouse DDL (default 90 days) + store_llm_traces: bool = Field( + default=False, + description="Enable storing LLM traces in ClickHouse for cost analytics", + ) + # FastAPI Application Settings uvicorn_workers: int = 1 uvicorn_reload: bool = False diff --git a/letta/streaming_interface.py b/letta/streaming_interface.py index 83d5e2c6..ec447cc7 100644 --- a/letta/streaming_interface.py +++ b/letta/streaming_interface.py @@ -334,7 +334,7 @@ class StreamingRefreshCLIInterface(AgentRefreshStreamingInterface): if self.separate_send_message and function_name == "send_message": try: message = json.loads(function_args)["message"] - except: + except Exception: prefix = '{\n "message": "' if len(function_args) < len(prefix): message = "..." diff --git a/letta/system.py b/letta/system.py index 95d919e3..bb3cb527 100644 --- a/letta/system.py +++ b/letta/system.py @@ -175,7 +175,7 @@ def package_system_message(system_message, timezone, message_type="system_alert" if "type" in message_json and message_json["type"] == message_type: logger.warning(f"Attempted to pack a system message that is already packed. Not packing: '{system_message}'") return system_message - except: + except Exception: pass # do nothing, expected behavior that the message is not JSON formatted_time = get_local_time(timezone=timezone) @@ -204,11 +204,24 @@ def package_summarize_message(summary, summary_message_count, hidden_message_cou return json_dumps(packaged_message) -def package_summarize_message_no_counts(summary, timezone): - context_message = ( - "Note: prior messages have been hidden from view due to conversation memory constraints.\n" - + f"The following is a summary of the previous messages:\n {summary}" - ) +def package_summarize_message_no_counts(summary, timezone, compaction_stats: dict | None = None, mode: str | None = None): + if mode and "sliding_window" in mode: # sliding_window, self_compact_sliding_window + if compaction_stats and "messages_count_before" in compaction_stats and "messages_count_after" in compaction_stats: + num_evicted = compaction_stats["messages_count_before"] - compaction_stats["messages_count_after"] + context_message = ( + f"Note: {num_evicted} messages from the beginning of the conversation have been hidden from view due to memory constraints.\n" + + f"The following is a summary of the previous messages:\n {summary}" + ) + else: + context_message = ( + "Note: prior messages from the beginning of the conversation have been hidden from view due to conversation memory constraints.\n" + + f"The following is a summary of the previous messages:\n {summary}" + ) + else: # all, self + context_message = ( + "Note: prior messages have been hidden from view due to conversation memory constraints.\n" + + f"The following is a summary of the previous messages:\n {summary}" + ) formatted_time = get_local_time(timezone=timezone) packaged_message = { @@ -217,6 +230,9 @@ def package_summarize_message_no_counts(summary, timezone): "time": formatted_time, } + if compaction_stats: + packaged_message["compaction_stats"] = compaction_stats + return json_dumps(packaged_message) @@ -257,7 +273,7 @@ def unpack_message(packed_message: str) -> str: message_json = json.loads(packed_message) if type(message_json) is not dict: return packed_message - except: + except Exception: return packed_message if "message" not in message_json: @@ -269,7 +285,7 @@ def unpack_message(packed_message: str) -> str: else: try: message_type = message_json["type"] - except: + except Exception: return packed_message if message_type != "user_message": diff --git a/letta/test_gemini.py b/letta/test_gemini.py new file mode 100644 index 00000000..ffb39634 --- /dev/null +++ b/letta/test_gemini.py @@ -0,0 +1,21 @@ +from letta_client import Letta + + +def create_agent() -> None: + client = Letta(base_url="http://localhost:8283") + + agent_state = client.agents.create( + name="test-gemini-3-pro-agent", + model="google_ai/gemini-3.1-pro-preview", + embedding="openai/text-embedding-3-small", + context_window_limit=16000, + ) + print("Created agent: ", agent_state) + + +def main(): + create_agent() + + +if __name__ == "__main__": + main() diff --git a/letta/types/__init__.py b/letta/types/__init__.py index b0f83c65..2ff19c97 100644 --- a/letta/types/__init__.py +++ b/letta/types/__init__.py @@ -1,4 +1,4 @@ -from typing import Any, TypeAlias +from typing import TypeAlias from pydantic import JsonValue diff --git a/letta/utils.py b/letta/utils.py index 3c5fe5a8..852b4670 100644 --- a/letta/utils.py +++ b/letta/utils.py @@ -17,11 +17,10 @@ from contextlib import contextmanager from datetime import datetime, timezone from functools import wraps from logging import Logger -from typing import Any, Callable, Coroutine, Optional, Union, _GenericAlias, get_args, get_origin, get_type_hints +from typing import Any, Callable, Optional, Union, _GenericAlias, get_args, get_origin, get_type_hints # type: ignore[attr-defined] from urllib.parse import urljoin, urlparse import demjson3 as demjson -import tiktoken from pathvalidate import sanitize_filename as pathvalidate_sanitize_filename from sqlalchemy import text @@ -491,6 +490,25 @@ def get_tool_call_id() -> str: return str(uuid.uuid4())[:TOOL_CALL_ID_MAX_LEN] +# Pattern for valid tool_call_id (required by Anthropic: ^[a-zA-Z0-9_-]+$) +TOOL_CALL_ID_PATTERN = re.compile(r"^[a-zA-Z0-9_-]+$") + + +def sanitize_tool_call_id(tool_id: str) -> str: + """Ensure tool_call_id matches cross-provider requirements: + - Anthropic: pattern ^[a-zA-Z0-9_-]+$ + - OpenAI: max length 29 characters + + Some models (e.g. Kimi via OpenRouter) generate IDs like 'Read:93' which + contain invalid characters. This sanitizes them for cross-provider compatibility. + """ + # Replace invalid characters with underscores + if not TOOL_CALL_ID_PATTERN.match(tool_id): + tool_id = re.sub(r"[^a-zA-Z0-9_-]", "_", tool_id) + # Truncate to max length + return tool_id[:TOOL_CALL_ID_MAX_LEN] + + def assistant_function_to_tool(assistant_message: dict) -> dict: assert "function_call" in assistant_message new_msg = copy.deepcopy(assistant_message) @@ -1365,7 +1383,6 @@ def fire_and_forget(coro, task_name: Optional[str] = None, error_callback: Optio Returns: The created asyncio Task object """ - import traceback task = asyncio.create_task(coro) diff --git a/pyproject.toml b/pyproject.toml index a6119383..6a5a02d0 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -1,6 +1,6 @@ [project] name = "letta" -version = "0.16.4" +version = "0.16.5" description = "Create LLM agents with long-term memory and custom tools" authors = [ {name = "Letta Team", email = "contact@letta.com"}, @@ -66,7 +66,8 @@ dependencies = [ "certifi>=2025.6.15", "markitdown[docx,pdf,pptx]>=0.1.2", "orjson>=3.11.1", - "ruff[dev]>=0.12.10", + "ruff>=0.12.10", + "ty>=0.0.17", "trafilatura", "readability-lxml", "google-genai>=1.52.0", @@ -77,6 +78,7 @@ dependencies = [ "clickhouse-connect>=0.10.0", "aiofiles>=24.1.0", "async-lru>=2.0.5", + ] [project.scripts] @@ -176,23 +178,41 @@ extend-exclude = [ [tool.ruff.lint] select = [ - "E", # pycodestyle errors - "W", # pycodestyle warnings - "F", # pyflakes - "I", # isort + "E", # pycodestyle errors + "W", # pycodestyle warnings + "F", # pyflakes + "I", # isort + "RUF", # ruff + "FAST", # fastapi ] ignore = [ - "E501", # line too long (handled by formatter) - "E402", # module import not at top of file - "E711", # none-comparison - "E712", # true-false-comparison - "E722", # bare except - "E721", # type comparison - "F401", # unused import - "F821", # undefined name - "F811", # redefined while unused - "F841", # local variable assigned but never used - "W293", # blank line contains whitespace + "E402", # module level import not at top of file + "E501", # line too long (handled by formatter) + "E711", # comparison to None (SQLAlchemy requires ==) + "E712", # comparison to True/False (SQLAlchemy requires ==) + "FAST002", # FastAPI dependency without Annotated (large migration) + "RUF001", # ambiguous unicode character in string + "RUF002", # ambiguous unicode character in docstring + "RUF003", # ambiguous unicode character in comment + "RUF010", # explicit conversion flag in f-string +] + +[tool.ty.rules] +all = "ignore" +unresolved-reference = "error" +unresolved-import = "ignore" + +[tool.ty.src] +exclude = ["examples/", "tests/data/"] + +[tool.ty.analysis] +allowed-unresolved-imports = [ + "letta_client.**", + "cowsay", + "transformers", + "core.utils", + "core.menu", + "some_nonexistent_package", ] [tool.ruff.lint.isort] diff --git a/sandbox/modal_executor.py b/sandbox/modal_executor.py index 2b759967..3c453fc0 100644 --- a/sandbox/modal_executor.py +++ b/sandbox/modal_executor.py @@ -225,7 +225,7 @@ def setup_signal_handlers(): # Enable fault handler with file output try: faulthandler.enable(file=sys.stderr, all_threads=True) - except: + except Exception: pass # Faulthandler might not be available # Set resource limits to prevent runaway processes @@ -234,7 +234,7 @@ def setup_signal_handlers(): resource.setrlimit(resource.RLIMIT_AS, (1024 * 1024 * 1024, 1024 * 1024 * 1024)) # Limit stack size to 8MB (default is often unlimited) resource.setrlimit(resource.RLIMIT_STACK, (8 * 1024 * 1024, 8 * 1024 * 1024)) - except: + except Exception: pass # Resource limits might not be available # Set environment variables diff --git a/tests/adapters/test_letta_llm_stream_adapter_error_handling.py b/tests/adapters/test_letta_llm_stream_adapter_error_handling.py index 3241ce7e..a7d7e7e5 100644 --- a/tests/adapters/test_letta_llm_stream_adapter_error_handling.py +++ b/tests/adapters/test_letta_llm_stream_adapter_error_handling.py @@ -1,10 +1,20 @@ import anthropic import httpx +import openai import pytest +from google.genai import errors as google_errors from letta.adapters.letta_llm_stream_adapter import LettaLLMStreamAdapter -from letta.errors import ContextWindowExceededError, LLMConnectionError, LLMServerError +from letta.errors import ( + ContextWindowExceededError, + LLMBadRequestError, + LLMConnectionError, + LLMInsufficientCreditsError, + LLMServerError, +) from letta.llm_api.anthropic_client import AnthropicClient +from letta.llm_api.google_vertex_client import GoogleVertexClient +from letta.schemas.enums import LLMCallType from letta.schemas.llm_config import LLMConfig @@ -42,7 +52,7 @@ async def test_letta_llm_stream_adapter_converts_anthropic_streaming_api_status_ llm_client = AnthropicClient() llm_config = LLMConfig(model="claude-sonnet-4-5-20250929", model_endpoint_type="anthropic", context_window=200000) - adapter = LettaLLMStreamAdapter(llm_client=llm_client, llm_config=llm_config) + adapter = LettaLLMStreamAdapter(llm_client=llm_client, llm_config=llm_config, call_type=LLMCallType.agent_step) gen = adapter.invoke_llm(request_data={}, messages=[], tools=[], use_assistant_message=True) with pytest.raises(LLMServerError): @@ -83,7 +93,7 @@ async def test_letta_llm_stream_adapter_converts_anthropic_413_request_too_large llm_client = AnthropicClient() llm_config = LLMConfig(model="claude-sonnet-4-5-20250929", model_endpoint_type="anthropic", context_window=200000) - adapter = LettaLLMStreamAdapter(llm_client=llm_client, llm_config=llm_config) + adapter = LettaLLMStreamAdapter(llm_client=llm_client, llm_config=llm_config, call_type=LLMCallType.agent_step) gen = adapter.invoke_llm(request_data={}, messages=[], tools=[], use_assistant_message=True) with pytest.raises(ContextWindowExceededError): @@ -117,7 +127,7 @@ async def test_letta_llm_stream_adapter_converts_httpx_read_error(monkeypatch): llm_client = AnthropicClient() llm_config = LLMConfig(model="claude-sonnet-4-5-20250929", model_endpoint_type="anthropic", context_window=200000) - adapter = LettaLLMStreamAdapter(llm_client=llm_client, llm_config=llm_config) + adapter = LettaLLMStreamAdapter(llm_client=llm_client, llm_config=llm_config, call_type=LLMCallType.agent_step) gen = adapter.invoke_llm(request_data={}, messages=[], tools=[], use_assistant_message=True) with pytest.raises(LLMConnectionError): @@ -151,7 +161,7 @@ async def test_letta_llm_stream_adapter_converts_httpx_write_error(monkeypatch): llm_client = AnthropicClient() llm_config = LLMConfig(model="claude-sonnet-4-5-20250929", model_endpoint_type="anthropic", context_window=200000) - adapter = LettaLLMStreamAdapter(llm_client=llm_client, llm_config=llm_config) + adapter = LettaLLMStreamAdapter(llm_client=llm_client, llm_config=llm_config, call_type=LLMCallType.agent_step) gen = adapter.invoke_llm(request_data={}, messages=[], tools=[], use_assistant_message=True) with pytest.raises(LLMConnectionError): @@ -187,3 +197,93 @@ def test_anthropic_client_handle_llm_error_request_too_large_string(): assert isinstance(result, ContextWindowExceededError) assert "request_too_large" in result.message.lower() or "context window exceeded" in result.message.lower() + + +@pytest.mark.parametrize( + "error_message", + [ + "The input token count exceeds the maximum number of tokens allowed 1048576.", + "Token count of 1500000 exceeds the model limit of 1048576 tokens allowed.", + ], + ids=["gemini-token-count-exceeds", "gemini-tokens-allowed-limit"], +) +def test_google_client_handle_llm_error_token_limit_returns_context_window_exceeded(error_message): + """Google 400 errors about token limits should map to ContextWindowExceededError.""" + client = GoogleVertexClient.__new__(GoogleVertexClient) + response_json = { + "message": f'{{"error": {{"code": 400, "message": "{error_message}", "status": "INVALID_ARGUMENT"}}}}', + "status": "Bad Request", + } + error = google_errors.ClientError(400, response_json) + result = client.handle_llm_error(error) + assert isinstance(result, ContextWindowExceededError) + + +def test_google_client_handle_llm_error_context_exceeded_returns_context_window_exceeded(): + """Google 400 errors with 'context' + 'exceeded' should map to ContextWindowExceededError.""" + client = GoogleVertexClient.__new__(GoogleVertexClient) + response_json = { + "message": '{"error": {"code": 400, "message": "Request context window exceeded the limit.", "status": "INVALID_ARGUMENT"}}', + "status": "Bad Request", + } + error = google_errors.ClientError(400, response_json) + result = client.handle_llm_error(error) + assert isinstance(result, ContextWindowExceededError) + + +def test_google_client_handle_llm_error_generic_400_returns_bad_request(): + """Google 400 errors without token/context keywords should map to LLMBadRequestError.""" + client = GoogleVertexClient.__new__(GoogleVertexClient) + response_json = { + "message": '{"error": {"code": 400, "message": "Invalid argument: unsupported parameter.", "status": "INVALID_ARGUMENT"}}', + "status": "Bad Request", + } + error = google_errors.ClientError(400, response_json) + result = client.handle_llm_error(error) + assert isinstance(result, LLMBadRequestError) + assert not isinstance(result, ContextWindowExceededError) + + +@pytest.mark.parametrize( + "error_message", + [ + "Insufficient credits. Add more using https://openrouter.ai/settings/credits", + "This request requires more credits, or fewer max_tokens. You requested up to 65536 tokens, but can only afford 2679.", + "You exceeded your current quota, please check your plan and billing details.", + ], + ids=["openrouter-402", "openrouter-streaming-afford", "openai-quota-exceeded"], +) +def test_openai_client_handle_llm_error_insufficient_credits(error_message): + """Credit/quota errors should map to LLMInsufficientCreditsError.""" + from letta.llm_api.openai_client import OpenAIClient + + client = OpenAIClient() + request = httpx.Request("POST", "https://api.openai.com/v1/chat/completions") + error = openai.APIError(message=error_message, request=request, body=None) + result = client.handle_llm_error(error) + assert isinstance(result, LLMInsufficientCreditsError) + + +def test_openai_client_handle_llm_error_402_status_code(): + """402 APIStatusError should map to LLMInsufficientCreditsError.""" + from letta.llm_api.openai_client import OpenAIClient + + client = OpenAIClient() + request = httpx.Request("POST", "https://openrouter.ai/api/v1/chat/completions") + response = httpx.Response(status_code=402, request=request) + body = {"error": {"message": "Insufficient credits", "code": 402}} + error = openai.APIStatusError("Insufficient credits", response=response, body=body) + result = client.handle_llm_error(error) + assert isinstance(result, LLMInsufficientCreditsError) + + +def test_openai_client_handle_llm_error_non_credit_api_error(): + """Non-credit bare APIError should map to LLMBadRequestError, not LLMInsufficientCreditsError.""" + from letta.llm_api.openai_client import OpenAIClient + + client = OpenAIClient() + request = httpx.Request("POST", "https://api.openai.com/v1/chat/completions") + error = openai.APIError(message="Some other API error occurred", request=request, body=None) + result = client.handle_llm_error(error) + assert isinstance(result, LLMBadRequestError) + assert not isinstance(result, LLMInsufficientCreditsError) diff --git a/tests/configs/llm_model_configs/claude-3-5-haiku.json b/tests/configs/llm_model_configs/claude-3-5-haiku.json index 89f4e0c5..bdda9f62 100644 --- a/tests/configs/llm_model_configs/claude-3-5-haiku.json +++ b/tests/configs/llm_model_configs/claude-3-5-haiku.json @@ -1,6 +1,6 @@ { "context_window": 200000, - "model": "claude-3-5-haiku-20241022", + "model": "claude-haiku-4-5-20251001", "model_endpoint_type": "anthropic", "model_endpoint": "https://api.anthropic.com/v1", "model_wrapper": null, diff --git a/tests/conftest.py b/tests/conftest.py index fada6cb4..8bb49443 100644 --- a/tests/conftest.py +++ b/tests/conftest.py @@ -14,7 +14,6 @@ from letta_client import Letta from letta.server.db import db_registry from letta.services.organization_manager import OrganizationManager from letta.services.user_manager import UserManager -from letta.settings import tool_settings def pytest_configure(config): @@ -67,15 +66,6 @@ def client(server_url: str) -> Letta: yield client_instance -@pytest.fixture(scope="session", autouse=True) -def disable_db_pooling_for_tests(): - """Disable database connection pooling for the entire test session.""" - os.environ["LETTA_DISABLE_SQLALCHEMY_POOLING"] = "true" - yield - if "LETTA_DISABLE_SQLALCHEMY_POOLING" in os.environ: - del os.environ["LETTA_DISABLE_SQLALCHEMY_POOLING"] - - @pytest.fixture(autouse=True) async def cleanup_db_connections(): """Cleanup database connections after each test.""" diff --git a/tests/helpers/client_helper.py b/tests/helpers/client_helper.py index 99740d54..8a45b208 100644 --- a/tests/helpers/client_helper.py +++ b/tests/helpers/client_helper.py @@ -1,6 +1,6 @@ import time -from letta import RESTClient +from letta import RESTClient # type: ignore[attr-defined] from letta.schemas.enums import JobStatus from letta.schemas.job import Job from letta.schemas.source import Source diff --git a/tests/helpers/endpoints_helper.py b/tests/helpers/endpoints_helper.py index b169427c..a9a5ab70 100644 --- a/tests/helpers/endpoints_helper.py +++ b/tests/helpers/endpoints_helper.py @@ -142,7 +142,7 @@ def assert_invoked_send_message_with_keyword(messages: Sequence[LettaMessage], k send_message_function_call = target_message.tool_call try: arguments = json.loads(send_message_function_call.arguments) - except: + except Exception: raise InvalidToolCallError(messages=[target_message], explanation="Function call arguments could not be loaded into JSON") # Message field not in send_message diff --git a/tests/helpers/utils.py b/tests/helpers/utils.py index 52b13bc7..17715b91 100644 --- a/tests/helpers/utils.py +++ b/tests/helpers/utils.py @@ -186,6 +186,9 @@ def validate_context_window_overview( # 2. All token counts should be non-negative assert overview.num_tokens_system >= 0, "System token count cannot be negative" assert overview.num_tokens_core_memory >= 0, "Core memory token count cannot be negative" + assert overview.num_tokens_memory_filesystem >= 0, "Memory filesystem token count cannot be negative" + assert overview.num_tokens_tool_usage_rules >= 0, "Tool usage rules token count cannot be negative" + assert overview.num_tokens_directories >= 0, "Directories token count cannot be negative" assert overview.num_tokens_external_memory_summary >= 0, "External memory summary token count cannot be negative" assert overview.num_tokens_summary_memory >= 0, "Summary memory token count cannot be negative" assert overview.num_tokens_messages >= 0, "Messages token count cannot be negative" @@ -195,6 +198,9 @@ def validate_context_window_overview( expected_total = ( overview.num_tokens_system + overview.num_tokens_core_memory + + overview.num_tokens_memory_filesystem + + overview.num_tokens_tool_usage_rules + + overview.num_tokens_directories + overview.num_tokens_external_memory_summary + overview.num_tokens_summary_memory + overview.num_tokens_messages @@ -244,13 +250,14 @@ def validate_context_window_overview( avg_tokens_per_message = overview.num_tokens_messages / overview.num_messages assert avg_tokens_per_message >= 0, "Average tokens per message should be non-negative" - # 16. Check attached file is visible + # 16. Check attached file is visible in the directories section if attached_file: - assert attached_file.visible_content in overview.core_memory, "File must be attached in core memory" - assert '" in overview.core_memory - assert "max_files_open" in overview.core_memory, "Max files should be set in core memory" - assert "current_files_open" in overview.core_memory, "Current files should be set in core memory" + assert overview.directories is not None, "Directories section must exist when files are attached" + assert attached_file.visible_content in overview.directories, "File must be attached in directories" + assert '" in overview.directories + assert "max_files_open" in overview.directories, "Max files should be set in directories" + assert "current_files_open" in overview.directories, "Current files should be set in directories" # Check for tools assert overview.num_tokens_functions_definitions > 0 diff --git a/tests/integration_test_agent_tool_graph.py b/tests/integration_test_agent_tool_graph.py index 9a25d8d2..89a5f840 100644 --- a/tests/integration_test_agent_tool_graph.py +++ b/tests/integration_test_agent_tool_graph.py @@ -4,11 +4,9 @@ import uuid import pytest -from letta.agents.letta_agent_v2 import LettaAgentV2 from letta.agents.letta_agent_v3 import LettaAgentV3 from letta.config import LettaConfig from letta.schemas.letta_message import ToolCallMessage -from letta.schemas.letta_stop_reason import LettaStopReason, StopReasonType from letta.schemas.message import MessageCreate from letta.schemas.run import Run from letta.schemas.tool_rule import ( @@ -282,7 +280,7 @@ async def complex_child_tool(server): Returns: str: Summary string encoding the provided inputs. """ - return f"ok:{text}:{num}:{flag}:{len(arr)}:{len(obj)}" + return f"ok:{text}:{num}:{flag}:{len(arr)}" actor = await server.user_manager.get_actor_or_default_async() tool = await server.tool_manager.create_or_update_tool_async(create_tool_from_func(func=complex_child), actor=actor) diff --git a/tests/integration_test_async_tool_sandbox.py b/tests/integration_test_async_tool_sandbox.py index de690f12..2e3cd9f8 100644 --- a/tests/integration_test_async_tool_sandbox.py +++ b/tests/integration_test_async_tool_sandbox.py @@ -13,7 +13,6 @@ from dotenv import load_dotenv from letta_client import Letta from sqlalchemy import delete -from letta.config import LettaConfig from letta.functions.function_sets.base import core_memory_append, core_memory_replace from letta.orm.sandbox_config import SandboxConfig, SandboxEnvironmentVariable from letta.schemas.agent import AgentState, CreateAgent @@ -23,7 +22,6 @@ from letta.schemas.organization import Organization from letta.schemas.pip_requirement import PipRequirement from letta.schemas.sandbox_config import E2BSandboxConfig, LocalSandboxConfig, SandboxConfigCreate from letta.schemas.user import User -from letta.server.db import db_registry from letta.services.organization_manager import OrganizationManager from letta.services.sandbox_config_manager import SandboxConfigManager from letta.services.tool_manager import ToolManager @@ -382,7 +380,7 @@ async def tool_with_broken_pip_requirements(test_user): str: Should not reach here due to pip install failure. """ try: - import some_nonexistent_package # This will fail during pip install + import some_nonexistent_package # noqa: F401 return "This should not execute" except ImportError as e: diff --git a/tests/integration_test_builtin_tools.py b/tests/integration_test_builtin_tools.py index 9460b8a6..fad8b917 100644 --- a/tests/integration_test_builtin_tools.py +++ b/tests/integration_test_builtin_tools.py @@ -234,9 +234,9 @@ async def test_web_search() -> None: # Check for education-related information in summary and highlights result_text = "" - if "summary" in result and result["summary"]: + if result.get("summary"): result_text += " " + result["summary"].lower() - if "highlights" in result and result["highlights"]: + if result.get("highlights"): for highlight in result["highlights"]: result_text += " " + highlight.lower() diff --git a/tests/integration_test_chat_completions.py b/tests/integration_test_chat_completions.py index 89669350..86bae323 100644 --- a/tests/integration_test_chat_completions.py +++ b/tests/integration_test_chat_completions.py @@ -1,7 +1,6 @@ import os import threading import uuid -from typing import List import pytest from dotenv import load_dotenv @@ -12,7 +11,6 @@ from openai.types.chat.chat_completion_chunk import ChatCompletionChunk from letta.schemas.embedding_config import EmbeddingConfig from letta.schemas.enums import AgentType, MessageStreamStatus from letta.schemas.llm_config import LLMConfig -from letta.schemas.message import MessageCreate from letta.schemas.openai.chat_completion_request import ChatCompletionRequest, UserMessage as OpenAIUserMessage from letta.schemas.usage import LettaUsageStatistics from tests.utils import wait_for_server diff --git a/tests/integration_test_clickhouse_llm_traces.py b/tests/integration_test_clickhouse_llm_traces.py new file mode 100644 index 00000000..7d41a26c --- /dev/null +++ b/tests/integration_test_clickhouse_llm_traces.py @@ -0,0 +1,350 @@ +""" +Integration tests for ClickHouse-backed LLM raw traces. + +Validates that: +1) Agent message requests are stored in ClickHouse (request_json contains the message) +2) Summarization traces are stored and retrievable by step_id +3) Error traces are stored with is_error, error_type, and error_message +4) llm_config_json is properly stored +5) Cache and usage statistics are stored (cached_input_tokens, cache_write_tokens, reasoning_tokens) +""" + +import asyncio +import json +import os +import time +import uuid + +import pytest + +from letta.agents.letta_agent_v3 import LettaAgentV3 +from letta.config import LettaConfig +from letta.schemas.agent import CreateAgent +from letta.schemas.embedding_config import EmbeddingConfig +from letta.schemas.enums import MessageRole +from letta.schemas.letta_message_content import TextContent +from letta.schemas.llm_config import LLMConfig +from letta.schemas.message import Message, MessageCreate +from letta.schemas.run import Run +from letta.server.server import SyncServer +from letta.services.llm_trace_reader import get_llm_trace_reader +from letta.services.provider_trace_backends import get_provider_trace_backends +from letta.services.summarizer.summarizer import simple_summary +from letta.settings import settings, telemetry_settings + + +def _require_clickhouse_env() -> dict[str, str]: + endpoint = os.getenv("CLICKHOUSE_ENDPOINT") + password = os.getenv("CLICKHOUSE_PASSWORD") + if not endpoint or not password: + pytest.skip("ClickHouse env vars not set (CLICKHOUSE_ENDPOINT, CLICKHOUSE_PASSWORD)") + return { + "endpoint": endpoint, + "password": password, + "username": os.getenv("CLICKHOUSE_USERNAME", "default"), + "database": os.getenv("CLICKHOUSE_DATABASE", "otel"), + } + + +def _anthropic_llm_config() -> LLMConfig: + return LLMConfig( + model="claude-haiku-4-5-20251001", + model_endpoint_type="anthropic", + model_endpoint="https://api.anthropic.com/v1", + context_window=200000, + max_tokens=2048, + put_inner_thoughts_in_kwargs=False, + enable_reasoner=False, + ) + + +@pytest.fixture +async def server(): + config = LettaConfig.load() + config.save() + server = SyncServer(init_with_default_org_and_user=True) + await server.init_async() + await server.tool_manager.upsert_base_tools_async(actor=server.default_user) + yield server + + +@pytest.fixture +async def actor(server: SyncServer): + return server.default_user + + +@pytest.fixture +def clickhouse_settings(): + env = _require_clickhouse_env() + + original_values = { + "endpoint": settings.clickhouse_endpoint, + "username": settings.clickhouse_username, + "password": settings.clickhouse_password, + "database": settings.clickhouse_database, + "store_llm_traces": settings.store_llm_traces, + "provider_trace_backend": telemetry_settings.provider_trace_backend, + } + + settings.clickhouse_endpoint = env["endpoint"] + settings.clickhouse_username = env["username"] + settings.clickhouse_password = env["password"] + settings.clickhouse_database = env["database"] + settings.store_llm_traces = True + + # Configure telemetry to use clickhouse backend (set the underlying field, not the property) + telemetry_settings.provider_trace_backend = "clickhouse" + # Clear the cached backends so they get recreated with new settings + get_provider_trace_backends.cache_clear() + + yield + + settings.clickhouse_endpoint = original_values["endpoint"] + settings.clickhouse_username = original_values["username"] + settings.clickhouse_password = original_values["password"] + settings.clickhouse_database = original_values["database"] + settings.store_llm_traces = original_values["store_llm_traces"] + telemetry_settings.provider_trace_backend = original_values["provider_trace_backend"] + # Clear cache again to restore original backends + get_provider_trace_backends.cache_clear() + + +async def _wait_for_raw_trace(step_id: str, organization_id: str, timeout_seconds: int = 30): + """Wait for a trace to appear in ClickHouse. + + With async_insert + wait_for_async_insert=1, traces should appear quickly, + but we poll to handle any propagation delay. + """ + reader = get_llm_trace_reader() + deadline = time.time() + timeout_seconds + + while time.time() < deadline: + trace = await reader.get_by_step_id_async(step_id=step_id, organization_id=organization_id) + if trace is not None: + return trace + await asyncio.sleep(0.5) + + raise AssertionError(f"Timed out waiting for raw trace with step_id={step_id}") + + +@pytest.mark.asyncio +async def test_agent_message_stored_in_clickhouse(server: SyncServer, actor, clickhouse_settings): + """Test that agent step traces are stored with all fields including llm_config_json.""" + message_text = f"ClickHouse trace test {uuid.uuid4()}" + llm_config = _anthropic_llm_config() + + agent_state = await server.agent_manager.create_agent_async( + CreateAgent( + name=f"clickhouse_agent_{uuid.uuid4().hex[:8]}", + llm_config=llm_config, + embedding_config=EmbeddingConfig.default_config(model_name="letta"), + ), + actor=actor, + ) + + agent = LettaAgentV3(agent_state=agent_state, actor=actor) + run = await server.run_manager.create_run( + Run(agent_id=agent_state.id), + actor=actor, + ) + run_id = run.id + response = await agent.step( + [MessageCreate(role=MessageRole.user, content=[TextContent(text=message_text)])], + run_id=run_id, + ) + + step_id = next(msg.step_id for msg in reversed(response.messages) if msg.step_id) + trace = await _wait_for_raw_trace(step_id=step_id, organization_id=actor.organization_id) + + # Basic trace fields + assert trace.step_id == step_id + assert message_text in trace.request_json + assert trace.is_error is False + assert trace.error_type is None + assert trace.error_message is None + + # Verify llm_config_json is stored and contains expected fields + assert trace.llm_config_json, "llm_config_json should not be empty" + config_data = json.loads(trace.llm_config_json) + assert config_data.get("model") == llm_config.model + assert "context_window" in config_data + assert "max_tokens" in config_data + + # Token usage should be populated + assert trace.prompt_tokens > 0 + assert trace.completion_tokens >= 0 + assert trace.total_tokens > 0 + + +@pytest.mark.asyncio +async def test_summary_stored_with_content_and_usage(server: SyncServer, actor, clickhouse_settings): + """Test that summarization traces are stored with content, usage, and cache info.""" + step_id = f"step-{uuid.uuid4()}" + llm_config = _anthropic_llm_config() + summary_source_messages = [ + Message(role=MessageRole.system, content=[TextContent(text="System prompt")]), + Message(role=MessageRole.user, content=[TextContent(text="User message 1")]), + Message(role=MessageRole.assistant, content=[TextContent(text="Assistant response 1")]), + Message(role=MessageRole.user, content=[TextContent(text="User message 2")]), + ] + + summary_text = await simple_summary( + messages=summary_source_messages, + llm_config=llm_config, + actor=actor, + agent_id=f"agent-{uuid.uuid4()}", + agent_tags=["test", "clickhouse"], + run_id=f"run-{uuid.uuid4()}", + step_id=step_id, + compaction_settings={"mode": "partial_evict", "message_buffer_limit": 60}, + ) + + trace = await _wait_for_raw_trace(step_id=step_id, organization_id=actor.organization_id) + + # Basic assertions + assert trace.step_id == step_id + assert trace.call_type == "summarization" + assert trace.is_error is False + + # Verify llm_config_json is stored + assert trace.llm_config_json, "llm_config_json should not be empty" + config_data = json.loads(trace.llm_config_json) + assert config_data.get("model") == llm_config.model + + # Verify summary content in response + summary_in_response = False + try: + response_payload = json.loads(trace.response_json) + if isinstance(response_payload, dict): + if "choices" in response_payload: + content = response_payload.get("choices", [{}])[0].get("message", {}).get("content", "") + summary_in_response = summary_text.strip() in (content or "") + elif "content" in response_payload: + summary_in_response = summary_text.strip() in (response_payload.get("content") or "") + except Exception: + summary_in_response = False + + assert summary_in_response or summary_text in trace.response_json + + # Token usage should be populated + assert trace.prompt_tokens > 0 + assert trace.total_tokens > 0 + + # Cache fields may or may not be populated depending on provider response + # Just verify they're accessible (not erroring) + _ = trace.cached_input_tokens + _ = trace.cache_write_tokens + _ = trace.reasoning_tokens + + +@pytest.mark.asyncio +async def test_error_trace_stored_in_clickhouse(server: SyncServer, actor, clickhouse_settings): + """Test that error traces are stored with is_error=True and error details.""" + from letta.llm_api.anthropic_client import AnthropicClient + + step_id = f"step-error-{uuid.uuid4()}" + + # Create a client with invalid config to trigger an error + invalid_llm_config = LLMConfig( + model="invalid-model-that-does-not-exist", + model_endpoint_type="anthropic", + model_endpoint="https://api.anthropic.com/v1", + context_window=200000, + max_tokens=2048, + ) + + from letta.services.telemetry_manager import TelemetryManager + + client = AnthropicClient() + client.set_telemetry_context( + telemetry_manager=TelemetryManager(), + agent_id=f"agent-{uuid.uuid4()}", + run_id=f"run-{uuid.uuid4()}", + step_id=step_id, + call_type="agent_step", + org_id=actor.organization_id, + ) + client.actor = actor + + # Make a request that will fail + request_data = { + "model": invalid_llm_config.model, + "messages": [{"role": "user", "content": "test"}], + "max_tokens": 100, + } + + try: + await client.request_async_with_telemetry(request_data, invalid_llm_config) + except Exception: + pass # Expected to fail + + # Wait for the error trace to be written + trace = await _wait_for_raw_trace(step_id=step_id, organization_id=actor.organization_id) + + # Verify error fields + assert trace.step_id == step_id + assert trace.is_error is True + assert trace.error_type is not None, "error_type should be set for error traces" + assert trace.error_message is not None, "error_message should be set for error traces" + + # Verify llm_config_json is still stored even for errors + assert trace.llm_config_json, "llm_config_json should be stored even for error traces" + config_data = json.loads(trace.llm_config_json) + assert config_data.get("model") == invalid_llm_config.model + + +@pytest.mark.asyncio +async def test_cache_tokens_stored_for_anthropic(server: SyncServer, actor, clickhouse_settings): + """Test that Anthropic cache tokens (cached_input_tokens, cache_write_tokens) are stored. + + Note: This test verifies the fields are properly stored when present in the response. + Actual cache token values depend on Anthropic's prompt caching behavior. + """ + message_text = f"Cache test {uuid.uuid4()}" + llm_config = _anthropic_llm_config() + + agent_state = await server.agent_manager.create_agent_async( + CreateAgent( + name=f"cache_test_agent_{uuid.uuid4().hex[:8]}", + llm_config=llm_config, + embedding_config=EmbeddingConfig.default_config(model_name="letta"), + ), + actor=actor, + ) + + agent = LettaAgentV3(agent_state=agent_state, actor=actor) + run = await server.run_manager.create_run( + Run(agent_id=agent_state.id), + actor=actor, + ) + + # Make two requests - second may benefit from caching + response1 = await agent.step( + [MessageCreate(role=MessageRole.user, content=[TextContent(text=message_text)])], + run_id=run.id, + ) + step_id_1 = next(msg.step_id for msg in reversed(response1.messages) if msg.step_id) + + response2 = await agent.step( + [MessageCreate(role=MessageRole.user, content=[TextContent(text="Follow up question")])], + run_id=run.id, + ) + step_id_2 = next(msg.step_id for msg in reversed(response2.messages) if msg.step_id) + + # Check traces for both requests + trace1 = await _wait_for_raw_trace(step_id=step_id_1, organization_id=actor.organization_id) + trace2 = await _wait_for_raw_trace(step_id=step_id_2, organization_id=actor.organization_id) + + # Verify cache fields are accessible (may be None if no caching occurred) + # The important thing is they're stored correctly when present + for trace in [trace1, trace2]: + assert trace.prompt_tokens > 0 + # Cache fields should be stored (may be None or int) + assert trace.cached_input_tokens is None or isinstance(trace.cached_input_tokens, int) + assert trace.cache_write_tokens is None or isinstance(trace.cache_write_tokens, int) + assert trace.reasoning_tokens is None or isinstance(trace.reasoning_tokens, int) + + # Verify llm_config_json + assert trace.llm_config_json + config_data = json.loads(trace.llm_config_json) + assert config_data.get("model") == llm_config.model diff --git a/tests/integration_test_client_side_tools.py b/tests/integration_test_client_side_tools.py index f9339bc7..b7027c4a 100644 --- a/tests/integration_test_client_side_tools.py +++ b/tests/integration_test_client_side_tools.py @@ -318,7 +318,7 @@ def get_secret_code(input_text: str) -> str: print(" ✓ Without client_tools, server tool executed directly (no approval required)") # The response should eventually contain the server value - all_content = " ".join([msg.content for msg in response4.messages if hasattr(msg, "content") and msg.content]) + " ".join([msg.content for msg in response4.messages if hasattr(msg, "content") and msg.content]) tool_returns = [msg for msg in response4.messages if msg.message_type == "tool_return_message"] if tool_returns: server_return_value = tool_returns[0].tool_return diff --git a/tests/integration_test_conversations_sdk.py b/tests/integration_test_conversations_sdk.py index 7899bf44..129e0ecf 100644 --- a/tests/integration_test_conversations_sdk.py +++ b/tests/integration_test_conversations_sdk.py @@ -3,7 +3,6 @@ Integration tests for the Conversations API using the SDK. """ import uuid -from time import sleep import pytest import requests @@ -568,6 +567,94 @@ class TestConversationsSDK: assert first_message_id not in [m.id for m in messages_after] +class TestConversationDelete: + """Tests for the conversation delete endpoint.""" + + def test_delete_conversation(self, client: Letta, agent, server_url: str): + """Test soft deleting a conversation.""" + # Create a conversation + conversation = client.conversations.create(agent_id=agent.id) + assert conversation.id is not None + + # Delete it via REST endpoint + response = requests.delete( + f"{server_url}/v1/conversations/{conversation.id}", + ) + assert response.status_code == 200, f"Expected 200, got {response.status_code}: {response.text}" + + # Verify it's no longer accessible + response = requests.get( + f"{server_url}/v1/conversations/{conversation.id}", + ) + assert response.status_code == 404, f"Expected 404 for deleted conversation, got {response.status_code}" + + def test_delete_conversation_removes_from_list(self, client: Letta, agent, server_url: str): + """Test that deleted conversations don't appear in list.""" + # Create two conversations + conv1 = client.conversations.create(agent_id=agent.id) + conv2 = client.conversations.create(agent_id=agent.id) + + # Verify both appear in list + conversations = client.conversations.list(agent_id=agent.id) + conv_ids = [c.id for c in conversations] + assert conv1.id in conv_ids + assert conv2.id in conv_ids + + # Delete one + response = requests.delete( + f"{server_url}/v1/conversations/{conv1.id}", + ) + assert response.status_code == 200 + + # Verify only the non-deleted one appears in list + conversations = client.conversations.list(agent_id=agent.id) + conv_ids = [c.id for c in conversations] + assert conv1.id not in conv_ids, "Deleted conversation should not appear in list" + assert conv2.id in conv_ids, "Non-deleted conversation should still appear" + + def test_delete_conversation_not_found(self, client: Letta, agent, server_url: str): + """Test that deleting a non-existent conversation returns 404.""" + fake_id = "conv-00000000-0000-0000-0000-000000000000" + response = requests.delete( + f"{server_url}/v1/conversations/{fake_id}", + ) + assert response.status_code == 404 + + def test_delete_conversation_double_delete(self, client: Letta, agent, server_url: str): + """Test that deleting an already-deleted conversation returns 404.""" + # Create and delete a conversation + conversation = client.conversations.create(agent_id=agent.id) + + # First delete should succeed + response = requests.delete( + f"{server_url}/v1/conversations/{conversation.id}", + ) + assert response.status_code == 200 + + # Second delete should return 404 + response = requests.delete( + f"{server_url}/v1/conversations/{conversation.id}", + ) + assert response.status_code == 404, "Double delete should return 404" + + def test_update_deleted_conversation_fails(self, client: Letta, agent, server_url: str): + """Test that updating a deleted conversation returns 404.""" + # Create and delete a conversation + conversation = client.conversations.create(agent_id=agent.id) + + response = requests.delete( + f"{server_url}/v1/conversations/{conversation.id}", + ) + assert response.status_code == 200 + + # Try to update the deleted conversation + response = requests.patch( + f"{server_url}/v1/conversations/{conversation.id}", + json={"summary": "Updated summary"}, + ) + assert response.status_code == 404, "Updating deleted conversation should return 404" + + class TestConversationCompact: """Tests for the conversation compact (summarization) endpoint.""" @@ -617,6 +704,45 @@ class TestConversationCompact: ) assert len(compacted_messages) < initial_count + def test_compact_conversation_creates_summary_role_message(self, client: Letta, agent, server_url: str): + """Test that compaction creates a summary message with role='summary'.""" + # Create a conversation + conversation = client.conversations.create(agent_id=agent.id) + + # Send multiple messages to create a history worth summarizing + for i in range(5): + list( + client.conversations.messages.create( + conversation_id=conversation.id, + messages=[{"role": "user", "content": f"Message {i}: Tell me about topic {i}."}], + ) + ) + + # Call compact endpoint with 'all' mode to ensure a single summary + response = requests.post( + f"{server_url}/v1/conversations/{conversation.id}/compact", + json={ + "compaction_settings": { + "mode": "all", + } + }, + ) + assert response.status_code == 200, f"Expected 200, got {response.status_code}: {response.text}" + + # Get compacted messages + compacted_messages = client.conversations.messages.list( + conversation_id=conversation.id, + order="asc", + ) + + # After 'all' mode compaction, we expect: system message + summary message + # The summary message should have role='summary' + summary_messages = [msg for msg in compacted_messages if msg.role == "summary"] + assert len(summary_messages) == 1, ( + f"Expected exactly 1 summary message after compaction, found {len(summary_messages)}. " + f"Message roles: {[msg.role for msg in compacted_messages]}" + ) + def test_compact_conversation_with_settings(self, client: Letta, agent, server_url: str): """Test conversation compaction with custom compaction settings.""" # Create a conversation with multiple messages diff --git a/tests/integration_test_human_in_the_loop.py b/tests/integration_test_human_in_the_loop.py index 6024660f..fab4cc14 100644 --- a/tests/integration_test_human_in_the_loop.py +++ b/tests/integration_test_human_in_the_loop.py @@ -1,4 +1,3 @@ -import asyncio import logging import uuid from typing import Any, List @@ -7,7 +6,6 @@ from unittest.mock import patch import pytest from letta_client import APIError, Letta from letta_client.types import AgentState, MessageCreateParam, Tool -from letta_client.types.agents import ApprovalCreateParam from letta.adapters.simple_llm_stream_adapter import SimpleLLMStreamAdapter @@ -364,7 +362,7 @@ def test_invoke_tool_after_turning_off_requires_approval( try: assert messages[idx].message_type == "assistant_message" idx += 1 - except: + except Exception: pass assert messages[idx].message_type == "tool_call_message" @@ -377,7 +375,7 @@ def test_invoke_tool_after_turning_off_requires_approval( try: assert messages[idx].message_type == "assistant_message" idx += 1 - except: + except Exception: assert messages[idx].message_type == "tool_call_message" idx += 1 assert messages[idx].message_type == "tool_return_message" @@ -1326,7 +1324,7 @@ def test_agent_records_last_stop_reason_after_approval_flow( assert agent_after_approval.last_stop_reason != initial_stop_reason # Should be different from initial # Send follow-up message to complete the flow - response2 = client.agents.messages.create( + client.agents.messages.create( agent_id=agent.id, messages=USER_MESSAGE_FOLLOW_UP, ) @@ -1447,3 +1445,116 @@ def test_approve_with_cancellation( assert len(messages) > 0, "Should have persisted new messages" assert messages[0].message_type == "user_message", "First message should be a user message" assert "keep-alive" in messages[0].content, f"Expected keep-alive message, got '{messages[0].content}'" + + +def test_retry_with_summarization( + client: Letta, + agent: AgentState, +) -> None: + """ + Test that approval retry works correctly after summarization evicts messages from context. + + Scenario: + 1. Send message that triggers approval request + 2. Send approval response, but cancel during LLM processing + 3. Call summarize with mode='all' to evict all messages from context + 4. Verify only system and summary messages remain in context + 5. Retry the original approval response - should succeed via idempotency check + """ + import threading + import time + + # Step 1: Send message that triggers approval request + response = client.agents.messages.create( + agent_id=agent.id, + messages=USER_MESSAGE_TEST_APPROVAL, + ) + tool_call_id = response.messages[-1].tool_call.tool_call_id + + # Step 2: Start cancellation in background thread + def cancel_after_delay(): + time.sleep(0.3) # Wait for stream to start + client.agents.messages.cancel(agent_id=agent.id) + + cancel_thread = threading.Thread(target=cancel_after_delay, daemon=True) + cancel_thread.start() + + # Step 3: Start approval stream (will be cancelled during processing) + response = client.agents.messages.stream( + agent_id=agent.id, + messages=[ + { + "type": "approval", + "approvals": [ + { + "type": "tool", + "tool_call_id": tool_call_id, + "tool_return": SECRET_CODE, + "status": "success", + }, + ], + }, + ], + streaming=True, + stream_tokens=True, + ) + + # Step 4: Accumulate chunks (stream will be cancelled) + messages = accumulate_chunks(response) + + # Step 5: Verify we got cancelled + stop_reasons = [msg for msg in messages if hasattr(msg, "message_type") and msg.message_type == "stop_reason"] + assert len(stop_reasons) == 1, f"Expected exactly 1 stop_reason, got {len(stop_reasons)}" + assert stop_reasons[0].stop_reason == "cancelled", f"Expected stop_reason 'cancelled', got '{stop_reasons[0].stop_reason}'" + + cancel_thread.join(timeout=1.0) + + # Step 6: Verify tool return message is persisted + all_messages = client.agents.messages.list(agent_id=agent.id, limit=100).items + tool_return_messages = [m for m in all_messages if m.message_type == "tool_return_message"] + assert len(tool_return_messages) > 0, "Tool return message should be persisted" + + # Step 7: Call compact with mode='all' to evict all messages from context + compaction_response = client.agents.messages.compact( + agent_id=agent.id, + compaction_settings={"mode": "all"}, + ) + + # Step 8: Verify only system and summary messages remain in context (should be 2) + assert compaction_response.num_messages_after == 2, ( + f"Expected 2 messages (system + summary) after compaction, but got {compaction_response.num_messages_after}" + ) + + logger.info(f"✅ After compaction: {compaction_response.num_messages_before} -> {compaction_response.num_messages_after} messages") + + # Step 9: Retry the original approval response - should succeed via idempotency check + response = client.agents.messages.stream( + agent_id=agent.id, + messages=[ + { + "type": "approval", + "approvals": [ + { + "type": "tool", + "tool_call_id": tool_call_id, + "tool_return": SECRET_CODE, + "status": "success", + }, + ], + }, + ], + streaming=True, + stream_tokens=True, + ) + + # Step 10: Accumulate chunks + messages = accumulate_chunks(response) + + # Step 11: Verify we got chunks AND an end_turn stop reason (not an error) + assert len(messages) > 1, "Should receive at least some chunks" + + stop_reasons = [msg for msg in messages if hasattr(msg, "message_type") and msg.message_type == "stop_reason"] + assert len(stop_reasons) == 1, f"Expected exactly 1 stop_reason, got {len(stop_reasons)}" + assert stop_reasons[0].stop_reason == "end_turn", f"Expected stop_reason 'end_turn', got '{stop_reasons[0].stop_reason}'" + + logger.info("✅ Test passed: approval retry after summarization handled correctly via idempotency check") diff --git a/tests/integration_test_mcp.py b/tests/integration_test_mcp.py index 7ac138d4..06531772 100644 --- a/tests/integration_test_mcp.py +++ b/tests/integration_test_mcp.py @@ -16,7 +16,6 @@ from letta_client.types.tool_return_message import ToolReturnMessage from letta.functions.mcp_client.types import StdioServerConfig from letta.schemas.agent import AgentState from letta.schemas.embedding_config import EmbeddingConfig -from letta.schemas.letta_message_content import TextContent from letta.schemas.llm_config import LLMConfig # ------------------------------ diff --git a/tests/integration_test_modal.py b/tests/integration_test_modal.py index 8a2bb3cb..ae49b855 100644 --- a/tests/integration_test_modal.py +++ b/tests/integration_test_modal.py @@ -18,7 +18,6 @@ from letta.schemas.organization import Organization from letta.schemas.pip_requirement import PipRequirement from letta.schemas.sandbox_config import LocalSandboxConfig, ModalSandboxConfig, SandboxConfigCreate from letta.schemas.user import User -from letta.server.db import db_registry from letta.server.server import SyncServer from letta.services.organization_manager import OrganizationManager from letta.services.sandbox_config_manager import SandboxConfigManager diff --git a/tests/integration_test_multi_agent.py b/tests/integration_test_multi_agent.py index f4aec50f..37666239 100644 --- a/tests/integration_test_multi_agent.py +++ b/tests/integration_test_multi_agent.py @@ -86,7 +86,7 @@ def remove_stale_agents(client): @pytest.fixture(scope="function") def agent_obj(client: Letta) -> AgentState: """Create a test agent that we can call functions on""" - send_message_to_agent_tool = list(client.tools.list(name="send_message_to_agent_and_wait_for_reply"))[0] + send_message_to_agent_tool = next(iter(client.tools.list(name="send_message_to_agent_and_wait_for_reply"))) agent_state_instance = client.agents.create( agent_type="letta_v1_agent", include_base_tools=True, @@ -135,18 +135,50 @@ def roll_dice_tool(client: Letta): def test_send_message_to_agent(client: Letta, agent_obj: AgentState, other_agent_obj: AgentState): secret_word = "banana" - # Encourage the agent to send a message to the other agent_obj with the secret string + # Send a message to the agent asking it to use the tool response = client.agents.messages.create( agent_id=agent_obj.id, messages=[ MessageCreateParam( role="user", - content=f"Use your tool to send a message to another agent with id {other_agent_obj.id} to share the secret word: {secret_word}!", + content=f"IMPORTANT: You MUST use the send_message_to_agent_and_wait_for_reply tool RIGHT NOW to send a message to agent {other_agent_obj.id}. Include the exact secret word '{secret_word}' in your message. Call the tool immediately.", ) ], ) - # Get messages from the other agent + # FIRST: Verify the sender agent actually called the tool + # This catches LLM non-determinism early with a clear error message + found_tool_call = False + tool_return_message = None + target_snippet = f"'agent_id': '{other_agent_obj.id}', 'response': [" + + for m in response.messages: + if isinstance(m, ToolReturnMessage): + if target_snippet in m.tool_return: + found_tool_call = True + tool_return_message = m + break + + if not found_tool_call: + # Print debug info to help diagnose the issue + print("\n=== DEBUG: Sender agent did not call the tool ===") + print(f"Response messages from agent {agent_obj.id}:") + for i, m in enumerate(response.messages): + print(f"\nMessage {i} ({type(m).__name__}):") + if isinstance(m, ToolReturnMessage): + print(f" Tool return: {m.tool_return}") + elif hasattr(m, "content"): + print(f" Content: {m.content}") + else: + print(f" {m}") + raise AssertionError( + f"Sender agent {agent_obj.id} did not call send_message_to_agent_and_wait_for_reply tool. " + f"This is likely LLM non-determinism. Check debug output above for what the agent did instead." + ) + + print(f"\n✓ Tool call verified: {tool_return_message.tool_return[:200]}...") + + # SECOND: Now verify the message arrived at the receiver messages_page = client.agents.messages.list(agent_id=other_agent_obj.id) messages = messages_page.items @@ -161,46 +193,6 @@ def test_send_message_to_agent(client: Letta, agent_obj: AgentState, other_agent assert found_secret, f"Secret word '{secret_word}' not found in system messages of agent {other_agent_obj.id}" - # Search the sender agent for the response from another agent - in_context_messages_page = client.agents.messages.list(agent_id=agent_obj.id) - in_context_messages = in_context_messages_page.items - found = False - target_snippet = f"'agent_id': '{other_agent_obj.id}', 'response': [" - - for m in in_context_messages: - # Check ToolReturnMessage for the response - if isinstance(m, ToolReturnMessage): - if target_snippet in m.tool_return: - found = True - break - # Handle different message content structures - elif hasattr(m, "content"): - if isinstance(m.content, list) and len(m.content) > 0: - content_text = m.content[0].text if hasattr(m.content[0], "text") else str(m.content[0]) - else: - content_text = str(m.content) - - if target_snippet in content_text: - found = True - break - - if not found: - # Print debug info - joined = "\n".join( - [ - str( - m.content[0].text - if hasattr(m, "content") and isinstance(m.content, list) and len(m.content) > 0 and hasattr(m.content[0], "text") - else m.content - if hasattr(m, "content") - else f"<{type(m).__name__}>" - ) - for m in in_context_messages[1:] - ] - ) - print(f"In context messages of the sender agent (without system):\n\n{joined}") - raise Exception(f"Was not able to find an instance of the target snippet: {target_snippet}") - # Test that the agent can still receive messages fine response = client.agents.messages.create( agent_id=agent_obj.id, @@ -226,7 +218,7 @@ def test_send_message_to_agents_with_tags_simple(client: Letta): secret_word = "banana" # Create "manager" agent - send_message_to_agents_matching_tags_tool_id = list(client.tools.list(name="send_message_to_agents_matching_tags"))[0].id + send_message_to_agents_matching_tags_tool_id = next(iter(client.tools.list(name="send_message_to_agents_matching_tags"))).id manager_agent_state = client.agents.create( agent_type="letta_v1_agent", name="manager_agent", @@ -337,7 +329,7 @@ def test_send_message_to_agents_with_tags_complex_tool_use(client: Letta, roll_d test_id = str(uuid.uuid4())[:8] # Create "manager" agent - send_message_to_agents_matching_tags_tool_id = list(client.tools.list(name="send_message_to_agents_matching_tags"))[0].id + send_message_to_agents_matching_tags_tool_id = next(iter(client.tools.list(name="send_message_to_agents_matching_tags"))).id manager_agent_state = client.agents.create( agent_type="letta_v1_agent", tool_ids=[send_message_to_agents_matching_tags_tool_id], diff --git a/tests/integration_test_multi_modal_tool_returns.py b/tests/integration_test_multi_modal_tool_returns.py index 831913e6..6304e539 100644 --- a/tests/integration_test_multi_modal_tool_returns.py +++ b/tests/integration_test_multi_modal_tool_returns.py @@ -15,7 +15,7 @@ import uuid import pytest from letta_client import Letta -from letta_client.types.agents import ApprovalRequestMessage, AssistantMessage, ToolCallMessage +from letta_client.types.agents import ApprovalRequestMessage, AssistantMessage # ------------------------------ # Constants diff --git a/tests/integration_test_override_model.py b/tests/integration_test_override_model.py index e5a484a0..4a897a57 100644 --- a/tests/integration_test_override_model.py +++ b/tests/integration_test_override_model.py @@ -13,7 +13,7 @@ import os import threading import time import uuid -from typing import Any, Generator, List +from typing import Generator, List import pytest import requests diff --git a/tests/integration_test_send_message.py b/tests/integration_test_send_message.py index 4c1d71b4..488a9d8c 100644 --- a/tests/integration_test_send_message.py +++ b/tests/integration_test_send_message.py @@ -29,7 +29,7 @@ from letta_client.types.agents.letta_streaming_response import LettaPing, LettaS from letta_client.types.agents.text_content_param import TextContentParam from letta.errors import LLMError -from letta.helpers.reasoning_helper import is_reasoning_completely_disabled +from letta.helpers.reasoning_helper import is_reasoning_completely_disabled # noqa: F401 from letta.llm_api.openai_client import is_openai_reasoning_model logger = logging.getLogger(__name__) @@ -370,7 +370,7 @@ def assert_greeting_with_assistant_message_response( assert messages[index].otid and messages[index].otid[-1] == str(otid_suffix) index += 1 otid_suffix += 1 - except: + except Exception: # Reasoning is non-deterministic, so don't throw if missing pass @@ -508,7 +508,7 @@ def assert_greeting_without_assistant_message_response( assert messages[index].otid and messages[index].otid[-1] == str(otid_suffix) index += 1 otid_suffix += 1 - except: + except Exception: # Reasoning is non-deterministic, so don't throw if missing pass @@ -664,7 +664,7 @@ def assert_tool_call_response( assert messages[index].otid and messages[index].otid[-1] == str(otid_suffix) index += 1 otid_suffix += 1 - except: + except Exception: # Reasoning is non-deterministic, so don't throw if missing pass @@ -700,7 +700,7 @@ def assert_tool_call_response( assert isinstance(messages[index], (ReasoningMessage, HiddenReasoningMessage)) assert messages[index].otid and messages[index].otid[-1] == "0" index += 1 - except: + except Exception: # Reasoning is non-deterministic, so don't throw if missing pass @@ -856,7 +856,7 @@ def assert_image_input_response( assert messages[index].otid and messages[index].otid[-1] == str(otid_suffix) index += 1 otid_suffix += 1 - except: + except Exception: # Reasoning is non-deterministic, so don't throw if missing pass @@ -1889,7 +1889,7 @@ def test_async_greeting_with_assistant_message( messages_page = client.runs.messages.list(run_id=run.id) messages = messages_page.items - usage = client.runs.usage.retrieve(run_id=run.id) + client.runs.usage.retrieve(run_id=run.id) # TODO: add results API test later assert_greeting_with_assistant_message_response(messages, model_handle, model_settings, from_db=True) # TODO: remove from_db=True later @@ -2267,7 +2267,7 @@ def test_job_creation_for_send_message( assert len(new_runs) == 1 for run in runs: - if run.id == list(new_runs)[0]: + if run.id == next(iter(new_runs)): assert run.status == "completed" @@ -2557,7 +2557,7 @@ def test_inner_thoughts_toggle_interleaved( # ) # Test our helper functions - assert is_reasoning_completely_disabled(adjusted_llm_config), "Reasoning should be completely disabled" + # assert is_reasoning_completely_disabled(adjusted_llm_config), "Reasoning should be completely disabled" # Verify that assistant messages with tool calls have been scrubbed of inner thoughts # Branch assertions based on model endpoint type diff --git a/tests/integration_test_send_message_v2.py b/tests/integration_test_send_message_v2.py index ce267e7d..fb19126d 100644 --- a/tests/integration_test_send_message_v2.py +++ b/tests/integration_test_send_message_v2.py @@ -25,6 +25,8 @@ from letta_client.types.agents.letta_streaming_response import LettaPing, LettaS logger = logging.getLogger(__name__) +_background_tasks: set[asyncio.Task] = set() + # ------------------------------ # Helper Functions and Constants @@ -37,7 +39,7 @@ all_configs = [ "openai-gpt-5.json", "claude-4-5-sonnet.json", "gemini-2.5-pro.json", - "zai-glm-4.6.json", + "zai-glm-5.json", ] @@ -132,7 +134,7 @@ def assert_greeting_response( assert messages[index].otid and messages[index].otid[-1] == str(otid_suffix) index += 1 otid_suffix += 1 - except: + except Exception: # Reasoning is non-deterministic, so don't throw if missing pass @@ -203,16 +205,19 @@ def assert_tool_call_response( assert messages[index].otid and messages[index].otid[-1] == str(otid_suffix) index += 1 otid_suffix += 1 - except: + except Exception: # Reasoning is non-deterministic, so don't throw if missing pass - # Special case for claude-sonnet-4-5-20250929, opus-4.1, and zai which can generate an extra AssistantMessage before tool call - if ( - ("claude-sonnet-4-5-20250929" in model_handle or "claude-opus-4-1" in model_handle or model_settings.get("provider_type") == "zai") - and index < len(messages) - and isinstance(messages[index], AssistantMessage) - ): + # Special case for models that can generate an extra AssistantMessage before tool call + # (claude-sonnet-4-5, opus-4.1, zai, and self-hosted models like ollama/qwen3 with thinking) + is_extra_assistant_model = ( + "claude-sonnet-4-5-20250929" in model_handle + or "claude-opus-4-1" in model_handle + or model_settings.get("provider_type") == "zai" + or model_handle.startswith(("ollama/", "vllm/", "lmstudio_openai/")) + ) + if is_extra_assistant_model and index < len(messages) and isinstance(messages[index], AssistantMessage): # Skip the extra AssistantMessage and move to the next message index += 1 otid_suffix += 1 @@ -253,7 +258,7 @@ def assert_tool_call_response( assert messages[index].otid and messages[index].otid[-1] == str(otid_suffix) index += 1 otid_suffix += 1 - except: + except Exception: # Reasoning is non-deterministic, so don't throw if missing pass @@ -441,6 +446,10 @@ def get_expected_message_count_range( if model_settings.get("provider_type") == "zai": expected_range += 1 + # Self-hosted models (ollama/vllm/lmstudio) may emit an extra AssistantMessage with thinking content + if model_handle.startswith(("ollama/", "vllm/", "lmstudio/", "lmstudio_openai/")): + expected_range += 1 + if tool_call: # tool call and tool return messages expected_message_count += 2 @@ -561,13 +570,16 @@ async def agent_state(client: AsyncLetta) -> AgentState: """ dice_tool = await client.tools.upsert_from_function(func=roll_dice) + initial_model = TESTED_MODEL_CONFIGS[0][0] if TESTED_MODEL_CONFIGS else "openai/gpt-4o" + initial_embedding = os.getenv("EMBEDDING_HANDLE", "openai/text-embedding-3-small") + agent_state_instance = await client.agents.create( agent_type="letta_v1_agent", name="test_agent", include_base_tools=False, tool_ids=[dice_tool.id], - model="openai/gpt-4o", - embedding="openai/text-embedding-3-small", + model=initial_model, + embedding=initial_embedding, tags=["test"], ) yield agent_state_instance @@ -611,7 +623,7 @@ async def test_greeting( agent_id=agent_state.id, messages=USER_MESSAGE_FORCE_REPLY, ) - run = await wait_for_run_completion(client, run.id, timeout=60.0) + run = await wait_for_run_completion(client, run.id, timeout=120.0) messages_page = await client.runs.messages.list(run_id=run.id) messages = [m for m in messages_page.items if m.message_type != "user_message"] run_id = run.id @@ -677,6 +689,15 @@ async def test_parallel_tool_calls( if provider_type in ["google_ai", "google_vertex"]: pytest.skip("Gemini models are flaky for this test so we disable them for now") + if model_handle.startswith("lmstudio"): + pytest.skip("LMStudio runs on CPU and times out on parallel tool call tests") + + if model_handle.startswith("vllm"): + pytest.skip("vLLM Qwen3 tool call parsers incompatible with streaming parallel tool calls") + + if model_handle.startswith("vllm"): + pytest.skip("vLLM Qwen3 tool call parsers incompatible with streaming parallel tool calls") + # Update model_settings to enable parallel tool calling modified_model_settings = model_settings.copy() modified_model_settings["parallel_tool_calls"] = True @@ -697,7 +718,7 @@ async def test_parallel_tool_calls( agent_id=agent_state.id, messages=USER_MESSAGE_PARALLEL_TOOL_CALL, ) - await wait_for_run_completion(client, run.id, timeout=60.0) + await wait_for_run_completion(client, run.id, timeout=120.0) else: response = await client.agents.messages.stream( agent_id=agent_state.id, @@ -871,8 +892,10 @@ async def test_tool_call( agent_state = await client.agents.update(agent_id=agent_state.id, model=model_handle, model_settings=model_settings) if cancellation == "with_cancellation": - delay = 5 if "gpt-5" in model_handle else 0.5 # increase delay for responses api + delay = 5 if "gpt-5" in model_handle else 0.5 _cancellation_task = asyncio.create_task(cancel_run_after_delay(client, agent_state.id, delay=delay)) + _background_tasks.add(_cancellation_task) + _cancellation_task.add_done_callback(_background_tasks.discard) if send_type == "step": response = await client.agents.messages.create( @@ -886,7 +909,7 @@ async def test_tool_call( agent_id=agent_state.id, messages=USER_MESSAGE_ROLL_DICE, ) - run = await wait_for_run_completion(client, run.id, timeout=60.0) + run = await wait_for_run_completion(client, run.id, timeout=120.0) messages_page = await client.runs.messages.list(run_id=run.id) messages = [m for m in messages_page.items if m.message_type != "user_message"] run_id = run.id @@ -1076,6 +1099,10 @@ async def test_conversation_non_streaming_raw_http( assert "assistant_message" in message_types, f"Expected assistant_message in {message_types}" +@pytest.mark.skipif( + os.getenv("LLM_CONFIG_FILE", "").startswith(("ollama", "vllm", "lmstudio")), + reason="Structured output not supported on self-hosted providers in CI", +) @pytest.mark.parametrize( "model_handle,provider_type", [ @@ -1166,3 +1193,217 @@ async def test_json_schema_response_format( finally: # Cleanup await client.agents.delete(agent_state.id) + + +# Large memory block to exceed OpenAI's 1024 token caching threshold. +# This ensures the system prompt is large enough for OpenAI to cache it. +_LARGE_PERSONA_BLOCK = """ +You are an advanced AI assistant with extensive knowledge across multiple domains. + +# Core Capabilities + +## Technical Knowledge +- Software Engineering: Expert in Python, JavaScript, TypeScript, Go, Rust, and many other languages +- System Design: Deep understanding of distributed systems, microservices, and cloud architecture +- DevOps: Proficient in Docker, Kubernetes, CI/CD pipelines, and infrastructure as code +- Databases: Experience with SQL (PostgreSQL, MySQL) and NoSQL (MongoDB, Redis, Cassandra) databases +- Machine Learning: Knowledge of neural networks, transformers, and modern ML frameworks + +## Problem Solving Approach +When tackling problems, you follow a structured methodology: +1. Understand the requirements thoroughly +2. Break down complex problems into manageable components +3. Consider multiple solution approaches +4. Evaluate trade-offs between different options +5. Implement solutions with clean, maintainable code +6. Test thoroughly and iterate based on feedback + +## Communication Style +- Clear and concise explanations +- Use examples and analogies when helpful +- Adapt technical depth to the audience +- Ask clarifying questions when requirements are ambiguous +- Provide context and rationale for recommendations + +# Domain Expertise + +## Web Development +You have deep knowledge of: +- Frontend: React, Vue, Angular, Next.js, modern CSS frameworks +- Backend: Node.js, Express, FastAPI, Django, Flask +- API Design: REST, GraphQL, gRPC +- Authentication: OAuth, JWT, session management +- Performance: Caching strategies, CDNs, lazy loading + +## Data Engineering +You understand: +- ETL pipelines and data transformation +- Data warehousing concepts (Snowflake, BigQuery, Redshift) +- Stream processing (Kafka, Kinesis) +- Data modeling and schema design +- Data quality and validation + +## Cloud Platforms +You're familiar with: +- AWS: EC2, S3, Lambda, RDS, DynamoDB, CloudFormation +- GCP: Compute Engine, Cloud Storage, Cloud Functions, BigQuery +- Azure: Virtual Machines, Blob Storage, Azure Functions +- Serverless architectures and best practices +- Cost optimization strategies + +## Security +You consider: +- Common vulnerabilities (OWASP Top 10) +- Secure coding practices +- Encryption and key management +- Access control and authorization patterns +- Security audit and compliance requirements + +# Interaction Principles + +## Helpfulness +- Provide actionable guidance +- Share relevant resources and documentation +- Offer multiple approaches when appropriate +- Point out potential pitfalls and edge cases + +## Accuracy +- Verify information before sharing +- Acknowledge uncertainty when appropriate +- Correct mistakes promptly +- Stay up-to-date with best practices + +## Efficiency +- Get to the point quickly +- Avoid unnecessary verbosity +- Focus on what's most relevant +- Provide code examples when they clarify concepts +""" + "\n\n".join( + [ + f"Section {i + 1}: " + + """ +You have deep expertise in software development, including but not limited to: +- Programming languages: Python, JavaScript, TypeScript, Java, C++, Rust, Go, Swift, Kotlin, Ruby, PHP, Scala +- Web frameworks: React, Vue, Angular, Django, Flask, FastAPI, Express, Next.js, Nuxt, SvelteKit, Remix, Astro +- Databases: PostgreSQL, MySQL, MongoDB, Redis, Cassandra, DynamoDB, ElasticSearch, Neo4j, InfluxDB, TimescaleDB +- Cloud platforms: AWS (EC2, S3, Lambda, ECS, EKS, RDS), GCP (Compute Engine, Cloud Run, GKE), Azure (VMs, Functions, AKS) +- DevOps tools: Docker, Kubernetes, Terraform, Ansible, Jenkins, GitHub Actions, GitLab CI, CircleCI, ArgoCD +- Testing frameworks: pytest, Jest, Mocha, JUnit, unittest, Cypress, Playwright, Selenium, TestNG, RSpec +- Architecture patterns: Microservices, Event-driven, Serverless, Monolithic, CQRS, Event Sourcing, Hexagonal +- API design: REST, GraphQL, gRPC, WebSockets, Server-Sent Events, tRPC, JSON-RPC +""" + for i in range(4) + ] +) + +# Models that support prompt_cache_retention="24h": +# gpt-4.1, gpt-5 family (but not gpt-5-mini). +_PROMPT_CACHE_RETENTION_PREFIXES = ("gpt-4.1", "gpt-5") + +PROMPT_CACHE_MODEL_CONFIGS: List[Tuple[str, dict]] = [ + (handle, settings) + for handle, settings in TESTED_MODEL_CONFIGS + if settings.get("provider_type") == "openai" and any(handle.split("/")[-1].startswith(p) for p in _PROMPT_CACHE_RETENTION_PREFIXES) +] + + +@pytest.mark.skip(reason="the prompt caching is flaky") +@pytest.mark.parametrize( + "model_config", + PROMPT_CACHE_MODEL_CONFIGS, + ids=[handle for handle, _ in PROMPT_CACHE_MODEL_CONFIGS], +) +@pytest.mark.asyncio(loop_scope="function") +async def test_openai_prompt_cache_integration( + disable_e2b_api_key: Any, + client: AsyncLetta, + model_config: Tuple[str, dict], +) -> None: + """ + Integration test verifying OpenAI prompt caching works end-to-end. + + Tests models that support prompt_cache_retention="24h". + Validates that this field is accepted by OpenAI's API and produce cache hits. + + Strategy: + 1. Create an agent with a large persona block (>1024 tokens, OpenAI's caching threshold) + 2. Send message 1 -> primes the cache (cached_input_tokens should be 0 or small) + 3. Send message 2 -> should hit the cache (cached_input_tokens > 0) + + We rely on OpenAI's default prefix-hash routing (no prompt_cache_key) since each + agent has a unique system prompt, providing natural cache affinity. + """ + from letta_client.types import CreateBlockParam + + model_handle, model_settings = model_config + + agent = await client.agents.create( + name=f"prompt-cache-test-{uuid.uuid4().hex[:8]}", + agent_type="letta_v1_agent", + model=model_handle, + model_settings=model_settings, + embedding="openai/text-embedding-3-small", + include_base_tools=False, + memory_blocks=[ + CreateBlockParam( + label="persona", + value=_LARGE_PERSONA_BLOCK, + ) + ], + ) + + try: + # Message 1: Prime the cache. First request typically has cached_input_tokens=0. + response1 = await client.agents.messages.create( + agent_id=agent.id, + messages=[MessageCreateParam(role="user", content="Hello! Please introduce yourself briefly.")], + ) + assert response1.usage is not None, "First message should return usage data" + assert response1.usage.prompt_tokens > 0, "First message should have prompt_tokens > 0" + + logger.info( + f"[{model_handle}] Message 1 usage: " + f"prompt={response1.usage.prompt_tokens}, " + f"completion={response1.usage.completion_tokens}, " + f"cached_input={response1.usage.cached_input_tokens}" + ) + + # Verify we exceeded the 1024 token threshold for OpenAI caching + total_input_tokens = response1.usage.prompt_tokens + (response1.usage.cached_input_tokens or 0) + assert total_input_tokens >= 1024, f"Total input tokens ({total_input_tokens}) must be >= 1024 for OpenAI caching to activate" + + # Message 2: Should hit the cache thanks to prefix-hash routing. + response2 = await client.agents.messages.create( + agent_id=agent.id, + messages=[MessageCreateParam(role="user", content="What are your main areas of expertise?")], + ) + assert response2.usage is not None, "Second message should return usage data" + assert response2.usage.prompt_tokens > 0, "Second message should have prompt_tokens > 0" + + logger.info( + f"[{model_handle}] Message 2 usage: " + f"prompt={response2.usage.prompt_tokens}, " + f"completion={response2.usage.completion_tokens}, " + f"cached_input={response2.usage.cached_input_tokens}" + ) + + # CRITICAL: The second message should show cached_input_tokens > 0. + # This proves that prompt_cache_retention is being sent correctly + # and OpenAI is caching the prompt prefix. + cached_tokens = response2.usage.cached_input_tokens + assert cached_tokens is not None and cached_tokens > 0, ( + f"[{model_handle}] Expected cached_input_tokens > 0 on second message, got {cached_tokens}. " + "This means prompt caching is not working (cache miss occurred)." + ) + + # Cache hit ratio should be significant (most of the system prompt should be cached) + total_input_msg2 = response2.usage.prompt_tokens + (response2.usage.cached_input_tokens or 0) + cache_hit_ratio = cached_tokens / total_input_msg2 if total_input_msg2 > 0 else 0 + logger.info(f"[{model_handle}] Cache hit ratio: {cache_hit_ratio:.2%}") + + assert cache_hit_ratio >= 0.20, ( + f"[{model_handle}] Expected cache hit ratio >= 20%, got {cache_hit_ratio:.2%}. The large persona block should be mostly cached." + ) + + finally: + await client.agents.delete(agent.id) diff --git a/tests/integration_test_sleeptime_agent.py b/tests/integration_test_sleeptime_agent.py index dce649f7..1f26550c 100644 --- a/tests/integration_test_sleeptime_agent.py +++ b/tests/integration_test_sleeptime_agent.py @@ -325,7 +325,6 @@ async def test_sleeptime_agent_new_block_attachment(client): assert main_agent.id in [agent.id for agent in agents] # 4. Create a new block after agent creation - from letta.schemas.block import Block as PydanticBlock new_block = client.blocks.create( label="preferences", diff --git a/tests/integration_test_summarizer.py b/tests/integration_test_summarizer.py index fec63a4d..b339c4d6 100644 --- a/tests/integration_test_summarizer.py +++ b/tests/integration_test_summarizer.py @@ -13,20 +13,16 @@ from typing import List, Literal import pytest -from letta.agents.letta_agent_v2 import LettaAgentV2 from letta.agents.letta_agent_v3 import LettaAgentV3 from letta.config import LettaConfig -from letta.schemas.agent import CreateAgent, UpdateAgent -from letta.schemas.block import BlockUpdate, CreateBlock +from letta.schemas.agent import CreateAgent from letta.schemas.embedding_config import EmbeddingConfig from letta.schemas.enums import MessageRole -from letta.schemas.letta_message import LettaMessage +from letta.schemas.letta_message import EventMessage, SummaryMessage from letta.schemas.letta_message_content import TextContent, ToolCallContent, ToolReturnContent from letta.schemas.llm_config import LLMConfig -from letta.schemas.message import Message as PydanticMessage, MessageCreate -from letta.schemas.run import Run as PydanticRun +from letta.schemas.message import Message as PydanticMessage from letta.server.server import SyncServer -from letta.services.run_manager import RunManager from letta.services.summarizer.summarizer import simple_summary from letta.settings import model_settings @@ -219,7 +215,7 @@ async def test_summarize_empty_message_buffer(server: SyncServer, actor, llm_con # Run summarization - this may fail with empty buffer, which is acceptable behavior try: - summary, result, _ = await run_summarization(server, agent_state, in_context_messages, actor) + _summary, result, _ = await run_summarization(server, agent_state, in_context_messages, actor) # If it succeeds, verify result assert isinstance(result, list) @@ -312,7 +308,7 @@ async def test_summarize_initialization_messages_only(server: SyncServer, actor, # Run summarization - force=True with system messages only may fail try: - summary, result, _ = await run_summarization(server, agent_state, in_context_messages, actor, force=True) + _summary, result, _ = await run_summarization(server, agent_state, in_context_messages, actor, force=True) # Verify result assert isinstance(result, list) @@ -368,7 +364,7 @@ async def test_summarize_small_conversation(server: SyncServer, actor, llm_confi # Run summarization with force=True # Note: force=True with clear=True can be very aggressive and may fail on small message sets try: - summary, result, _ = await run_summarization(server, agent_state, in_context_messages, actor, force=True) + _summary, result, _ = await run_summarization(server, agent_state, in_context_messages, actor, force=True) # Verify result assert isinstance(result, list) @@ -461,7 +457,7 @@ async def test_summarize_large_tool_calls(server: SyncServer, actor, llm_config: assert total_content_size > 40000, f"Expected large messages, got {total_content_size} chars" # Run summarization - summary, result, _ = await run_summarization(server, agent_state, in_context_messages, actor) + _summary, result, _ = await run_summarization(server, agent_state, in_context_messages, actor) # Verify result assert isinstance(result, list) @@ -565,7 +561,7 @@ async def test_summarize_multiple_large_tool_calls(server: SyncServer, actor, ll assert total_content_size > 40000, f"Expected large messages, got {total_content_size} chars" # Run summarization - summary, result, _ = await run_summarization(server, agent_state, in_context_messages, actor) + _summary, result, _ = await run_summarization(server, agent_state, in_context_messages, actor) # Verify result assert isinstance(result, list) @@ -670,14 +666,24 @@ from unittest.mock import patch from letta.services.summarizer.summarizer_config import CompactionSettings -# Test both summarizer modes: "all" summarizes entire history, "sliding_window" keeps recent messages -SUMMARIZER_CONFIG_MODES: list[Literal["all", "sliding_window"]] = ["all", "sliding_window"] +# Test all summarizer modes: "all" summarizes entire history, "sliding_window" keeps recent messages +SUMMARIZER_CONFIG_MODES: list[Literal["all", "sliding_window", "self_compact_all", "self_compact_sliding_window"]] = [ + "all", + "sliding_window", + "self_compact_all", + "self_compact_sliding_window", +] @pytest.mark.asyncio @pytest.mark.parametrize("mode", SUMMARIZER_CONFIG_MODES, ids=SUMMARIZER_CONFIG_MODES) @pytest.mark.parametrize("llm_config", TESTED_LLM_CONFIGS, ids=[c.model for c in TESTED_LLM_CONFIGS]) -async def test_summarize_with_mode(server: SyncServer, actor, llm_config: LLMConfig, mode: Literal["all", "sliding_window"]): +async def test_summarize_with_mode( + server: SyncServer, + actor, + llm_config: LLMConfig, + mode: Literal["all", "sliding_window", "self_compact_all", "self_compact_sliding_window"], +): """ Test summarization with different CompactionSettings modes using LettaAgentV3. @@ -725,7 +731,7 @@ async def test_summarize_with_mode(server: SyncServer, actor, llm_config: LLMCon agent_loop = LettaAgentV3(agent_state=agent_state, actor=actor) - summary, result, _ = await agent_loop.compact(messages=in_context_messages) + _summary, result, summary_text = await agent_loop.compact(messages=in_context_messages) assert isinstance(result, list) @@ -734,31 +740,180 @@ async def test_summarize_with_mode(server: SyncServer, actor, llm_config: LLMCon assert hasattr(msg, "role") assert hasattr(msg, "content") + # Verify the summary text (third return value) is a non-empty string. + # This is used by the agent loop to construct a SummaryMessage for clients. + assert isinstance(summary_text, str), f"Expected summary_text to be a string, got {type(summary_text)}" + assert len(summary_text) > 0, "Expected non-empty summary text" + print() print(f"RESULTS {mode} ======") for msg in result: print(f"MSG: {msg}") + print(f"SUMMARY TEXT: {summary_text[:200]}...") print() - if mode == "all": - # For "all" mode, V3 keeps: + if mode == "all" or mode == "self_compact_all": + # For "all" or "self" mode, V3 keeps: # 1. System prompt # 2. A single user summary message (system_alert JSON) # and no remaining historical messages. - assert len(result) == 2, f"Expected 2 messages for 'all' mode (system + summary), got {len(result)}" + assert len(result) == 2, f"Expected 2 messages for {mode} mode (system + summary), got {len(result)}" assert result[0].role == MessageRole.system assert result[1].role == MessageRole.user else: - # For "sliding_window" mode, result should include: + # For "sliding_window" or "self_compact_sliding_window" mode, result should include: # 1. System prompt # 2. User summary message # 3+. Recent user/assistant messages inside the window. - assert len(result) > 2, f"Expected >2 messages for 'sliding_window' mode, got {len(result)}" + assert len(result) > 2, f"Expected >2 messages for {mode} mode, got {len(result)}" assert result[0].role == MessageRole.system assert result[1].role == MessageRole.user +@pytest.mark.asyncio +@pytest.mark.parametrize( + "llm_config", + TESTED_LLM_CONFIGS, + ids=[c.model for c in TESTED_LLM_CONFIGS], +) +async def test_compact_returns_valid_summary_message_and_event_message(server: SyncServer, actor, llm_config: LLMConfig): + """ + Test that compact() return values can be used to construct valid SummaryMessage and EventMessage objects. + + This validates the contract that _step() relies on: compact() returns + (summary_message_obj, compacted_messages, summary_text) where summary_text + is used to build a SummaryMessage and the metadata is used for an EventMessage. + """ + import uuid + + from letta.helpers.datetime_helpers import get_utc_time + + # Create a conversation with enough messages to summarize + messages = [ + PydanticMessage( + role=MessageRole.system, + content=[TextContent(type="text", text="You are a helpful assistant.")], + ) + ] + for i in range(10): + messages.append( + PydanticMessage( + role=MessageRole.user, + content=[TextContent(type="text", text=f"User message {i}: Test message {i}.")], + ) + ) + messages.append( + PydanticMessage( + role=MessageRole.assistant, + content=[TextContent(type="text", text=f"Assistant response {i}: Acknowledged message {i}.")], + ) + ) + + agent_state, in_context_messages = await create_agent_with_messages(server, actor, llm_config, messages) + + handle = llm_config.handle or f"{llm_config.model_endpoint_type}/{llm_config.model}" + agent_state.compaction_settings = CompactionSettings(model=handle, mode="all") + + agent_loop = LettaAgentV3(agent_state=agent_state, actor=actor) + + summary_message_obj, _compacted_messages, summary_text = await agent_loop.compact(messages=in_context_messages) + + # Verify we can construct a valid SummaryMessage from compact() return values + summary_msg = SummaryMessage( + id=summary_message_obj.id, + date=summary_message_obj.created_at, + summary=summary_text, + otid=PydanticMessage.generate_otid_from_id(summary_message_obj.id, 0), + step_id=None, + run_id=None, + ) + assert summary_msg.message_type == "summary_message" + assert isinstance(summary_msg.summary, str) + assert len(summary_msg.summary) > 0 + assert summary_msg.id == summary_message_obj.id + + # Verify we can construct a valid EventMessage for compaction + event_msg = EventMessage( + id=str(uuid.uuid4()), + date=get_utc_time(), + event_type="compaction", + event_data={ + "trigger": "post_step_context_check", + "context_token_estimate": 1000, + "context_window": agent_state.llm_config.context_window, + }, + run_id=None, + step_id=None, + ) + assert event_msg.message_type == "event_message" + assert event_msg.event_type == "compaction" + assert "trigger" in event_msg.event_data + assert "context_window" in event_msg.event_data + + +@pytest.mark.asyncio +@pytest.mark.parametrize( + "llm_config", + TESTED_LLM_CONFIGS, + ids=[c.model for c in TESTED_LLM_CONFIGS], +) +async def test_compact_with_use_summary_role_creates_summary_message_role(server: SyncServer, actor, llm_config: LLMConfig): + """ + Test that compact() with use_summary_role=True creates a message with role=MessageRole.summary. + + This validates that manual compaction endpoints (which pass use_summary_role=True) + will store summary messages with the dedicated 'summary' role instead of the legacy 'user' role. + """ + # Create a conversation with enough messages to summarize + messages = [ + PydanticMessage( + role=MessageRole.system, + content=[TextContent(type="text", text="You are a helpful assistant.")], + ) + ] + for i in range(10): + messages.append( + PydanticMessage( + role=MessageRole.user, + content=[TextContent(type="text", text=f"User message {i}: Test message {i}.")], + ) + ) + messages.append( + PydanticMessage( + role=MessageRole.assistant, + content=[TextContent(type="text", text=f"Assistant response {i}: Acknowledged message {i}.")], + ) + ) + + agent_state, in_context_messages = await create_agent_with_messages(server, actor, llm_config, messages) + + handle = llm_config.handle or f"{llm_config.model_endpoint_type}/{llm_config.model}" + agent_state.compaction_settings = CompactionSettings(model=handle, mode="all") + + agent_loop = LettaAgentV3(agent_state=agent_state, actor=actor) + + # Call compact with use_summary_role=True (as the REST endpoints now do) + summary_message_obj, compacted_messages, summary_text = await agent_loop.compact( + messages=in_context_messages, + use_summary_role=True, + ) + + # Verify the summary message has role=summary (not user) + assert summary_message_obj.role == MessageRole.summary, ( + f"Expected summary message to have role=summary when use_summary_role=True, got {summary_message_obj.role}" + ) + + # Verify the compacted messages list structure + assert len(compacted_messages) == 2, f"Expected 2 messages (system + summary), got {len(compacted_messages)}" + assert compacted_messages[0].role == MessageRole.system + assert compacted_messages[1].role == MessageRole.summary + + # Verify summary text is non-empty + assert isinstance(summary_text, str) + assert len(summary_text) > 0 + + @pytest.mark.asyncio async def test_v3_compact_uses_compaction_settings_model_and_model_settings(server: SyncServer, actor): """Integration test: LettaAgentV3.compact uses the LLMConfig implied by CompactionSettings. @@ -823,7 +978,7 @@ async def test_v3_compact_uses_compaction_settings_model_and_model_settings(serv # Patch simple_summary so we don't hit the real LLM and can inspect llm_config with patch.object(summarizer_all, "simple_summary", new=fake_simple_summary): agent_loop = LettaAgentV3(agent_state=agent_state, actor=actor) - summary_msg, compacted, _ = await agent_loop.compact(messages=in_context_messages) + summary_msg, _compacted, _ = await agent_loop.compact(messages=in_context_messages) assert summary_msg is not None assert "value" in captured_llm_config @@ -897,10 +1052,10 @@ async def test_v3_summarize_hard_eviction_when_still_over_threshold( # summarize_conversation_history to run and then hit the branch where the # *post*-summarization token count is still above the proactive # summarization threshold. We simulate that by patching the - # letta_agent_v3-level count_tokens helper to report an extremely large + # count_tokens_with_tools helper to report an extremely large # token count for the first call (post-summary) and a small count for the # second call (after hard eviction). - with patch("letta.agents.letta_agent_v3.count_tokens") as mock_count_tokens: + with patch("letta.services.summarizer.compact.count_tokens_with_tools") as mock_count_tokens: # First call: pretend the summarized context is still huge relative to # this model's context window so that we always trigger the # hard-eviction path. Second call: minimal context (system only) is @@ -911,7 +1066,7 @@ async def test_v3_summarize_hard_eviction_when_still_over_threshold( caplog.set_level("ERROR") - summary, result, _ = await agent_loop.compact( + _summary, result, summary_text = await agent_loop.compact( messages=in_context_messages, trigger_threshold=context_limit, ) @@ -932,6 +1087,10 @@ async def test_v3_summarize_hard_eviction_when_still_over_threshold( assert result[0].role == MessageRole.system assert result[1].role == MessageRole.user + # Verify the summary text is returned (used to construct SummaryMessage in the agent loop) + assert isinstance(summary_text, str), f"Expected summary_text to be a string, got {type(summary_text)}" + assert len(summary_text) > 0, "Expected non-empty summary text after hard eviction" + # ====================================================================================================================== # Sliding Window Summarizer Unit Tests @@ -1004,6 +1163,7 @@ async def test_sliding_window_cutoff_index_does_not_exceed_message_count(server: summary, remaining_messages = await summarize_via_sliding_window( actor=actor, llm_config=llm_config, + agent_llm_config=llm_config, # case where agent and summarizer have same config summarizer_config=summarizer_config, in_context_messages=messages, ) @@ -1042,97 +1202,206 @@ async def test_sliding_window_cutoff_index_does_not_exceed_message_count(server: TESTED_LLM_CONFIGS, ids=[c.model for c in TESTED_LLM_CONFIGS], ) -async def test_large_system_prompt_summarization(server: SyncServer, actor, llm_config: LLMConfig): +async def test_self_sliding_window_cutoff_index_does_not_exceed_message_count(server: SyncServer, actor, llm_config: LLMConfig): """ - Test edge case of large system prompt / memory blocks. + Test that the sliding window summarizer correctly calculates cutoff indices. - This test verifies that summarization handles the case where the system prompt - and memory blocks are very large, potentially consuming most of the context window. - The summarizer should gracefully handle this scenario without errors. + This test verifies the fix for a bug where the cutoff percentage was treated as + a whole number (10) instead of a decimal (0.10), causing: + message_cutoff_index = round(10 * 65) = 650 + when there were only 65 messages, resulting in an empty range loop and the error: + "No assistant message found from indices 650 to 65" + + The fix changed: + - max(..., 10) -> max(..., 0.10) + - += 10 -> += 0.10 + - >= 100 -> >= 1.0 + + This test uses the real token counter (via create_token_counter) to verify + the sliding window logic works with actual token counting. """ + from letta.llm_api.llm_client import LLMClient + from letta.schemas.agent import AgentType + from letta.services.summarizer.self_summarizer import self_summarize_sliding_window + from letta.services.summarizer.summarizer_config import CompactionSettings + from letta.services.telemetry_manager import TelemetryManager - # Override context window to be small so we trigger summarization - llm_config.context_window = 10000 + # Create a real summarizer config using the default factory + # Override sliding_window_percentage to 0.3 for this test + handle = llm_config.handle or f"{llm_config.model_endpoint_type}/{llm_config.model}" + summarizer_config = CompactionSettings(model=handle) + summarizer_config.sliding_window_percentage = 0.3 - # Create agent with large system prompt and memory blocks - agent_name = f"test_agent_large_system_prompt_{llm_config.model}".replace(".", "_").replace("/", "_") - agent_create = CreateAgent( - name=agent_name, - llm_config=llm_config, - embedding_config=DEFAULT_EMBEDDING_CONFIG, - system="SYSTEM PROMPT " * 10000, # Large system prompt - memory_blocks=[ - CreateBlock( - label="human", - limit=200000, - value="NAME " * 10000, # Large memory block + # Create 65 messages (similar to the failing case in the bug report) + # Pattern: system + alternating user/assistant messages + messages = [ + PydanticMessage( + role=MessageRole.system, + content=[TextContent(type="text", text="You are a helpful assistant.")], + ) + ] + + # Add 64 more messages (32 user-assistant pairs) + for i in range(32): + messages.append( + PydanticMessage( + role=MessageRole.user, + content=[TextContent(type="text", text=f"User message {i}")], + ) + ) + messages.append( + PydanticMessage( + role=MessageRole.assistant, + content=[TextContent(type="text", text=f"Assistant response {i}")], ) - ], - ) - agent_state = await server.agent_manager.create_agent_async(agent_create, actor=actor) - - # Create a run for the agent using RunManager - run = PydanticRun(agent_id=agent_state.id) - run = await RunManager().create_run(pydantic_run=run, actor=actor) - - # Create the agent loop using LettaAgentV3 - agent_loop = LettaAgentV3(agent_state=agent_state, actor=actor) - - # message the agent - input_message = MessageCreate(role=MessageRole.user, content="Hello") - - # Call step on the agent - may trigger summarization due to large context - from letta.errors import SystemPromptTokenExceededError - - with pytest.raises(SystemPromptTokenExceededError): - response = await agent_loop.step( - input_messages=[input_message], - run_id=run.id, - max_steps=3, ) - # Repair the agent by shortening the memory blocks and system prompt - # Update system prompt to a shorter version - short_system_prompt = "You are a helpful assistant." - await server.agent_manager.update_agent_async( - agent_id=agent_state.id, - agent_update=UpdateAgent(system=short_system_prompt), - actor=actor, - ) + assert len(messages) == 65, f"Expected 65 messages, got {len(messages)}" - # Update memory block to a shorter version - short_memory_value = "The user's name is Alice." - await server.agent_manager.modify_block_by_label_async( - agent_id=agent_state.id, - block_label="human", - block_update=BlockUpdate(value=short_memory_value), - actor=actor, - ) + # This should NOT raise "No assistant message found from indices 650 to 65" + # With the fix, message_count_cutoff_percent starts at max(0.7, 0.10) = 0.7 + # So message_cutoff_index = round(0.7 * 65) = 46, which is valid + try: + summary, remaining_messages = await self_summarize_sliding_window( + actor=actor, + agent_id="agent-test-self-sliding-window", + agent_llm_config=llm_config, + telemetry_manager=TelemetryManager(), + llm_client=LLMClient.create(llm_config), + agent_type=AgentType.letta_v1_agent, + messages=messages, + compaction_settings=summarizer_config, + timezone="UTC", + ) - # Reload agent state after repairs - agent_state = await server.agent_manager.get_agent_by_id_async(agent_id=agent_state.id, actor=actor) - print("REPAIRED AGENT STATE ======") - print(agent_state.system) - print(agent_state.blocks) + # Verify the summary was generated (actual LLM response) + assert summary is not None + assert len(summary) > 0 - # Create a new run for the repaired agent - run = PydanticRun(agent_id=agent_state.id) - run = await RunManager().create_run(pydantic_run=run, actor=actor) + # Verify remaining messages is a valid subset + assert len(remaining_messages) < len(messages) + assert len(remaining_messages) > 0 - # Create a new agent loop with the repaired agent state - agent_loop = LettaAgentV3(agent_state=agent_state, actor=actor) + print(f"Successfully summarized {len(messages)} messages to {len(remaining_messages)} remaining") + print(f"Summary: {summary[:200]}..." if len(summary) > 200 else f"Summary: {summary}") + print(f"Using {llm_config.model_endpoint_type} token counter for model {llm_config.model}") - # Now the agent should be able to respond without context window errors - response = await agent_loop.step( - input_messages=[input_message], - run_id=run.id, - max_steps=3, - ) + except ValueError as e: + if "No assistant message found from indices" in str(e): + # Extract the indices from the error message + import re - # Verify we got a valid response after repair - assert response is not None - assert response.messages is not None - print(f"Agent successfully responded after repair with {len(response.messages)} messages") + match = re.search(r"from indices (\d+) to (\d+)", str(e)) + if match: + start_idx, end_idx = int(match.group(1)), int(match.group(2)) + pytest.fail( + f"Bug detected: cutoff index ({start_idx}) exceeds message count ({end_idx}). " + f"This indicates the percentage calculation bug where 10 was used instead of 0.10. " + f"Error: {e}" + ) + raise + + +### NOTE: removing edge case test where sys prompt is huge for now +### because we no longer refresh the system prompt before compaction +### in order to leverage caching (for self compaction) +# @pytest.mark.asyncio +# @pytest.mark.parametrize( +# "llm_config", +# TESTED_LLM_CONFIGS, +# ids=[c.model for c in TESTED_LLM_CONFIGS], +# ) +# async def test_large_system_prompt_summarization(server: SyncServer, actor, llm_config: LLMConfig): +# """ +# Test edge case of large system prompt / memory blocks. + +# This test verifies that summarization handles the case where the system prompt +# and memory blocks are very large, potentially consuming most of the context window. +# The summarizer should gracefully handle this scenario without errors. +# """ + +# # Override context window to be small so we trigger summarization +# llm_config.context_window = 10000 + +# # Create agent with large system prompt and memory blocks +# agent_name = f"test_agent_large_system_prompt_{llm_config.model}".replace(".", "_").replace("/", "_") +# agent_create = CreateAgent( +# name=agent_name, +# llm_config=llm_config, +# embedding_config=DEFAULT_EMBEDDING_CONFIG, +# system="SYSTEM PROMPT " * 10000, # Large system prompt +# memory_blocks=[ +# CreateBlock( +# label="human", +# limit=200000, +# value="NAME " * 10000, # Large memory block +# ) +# ], +# ) +# agent_state = await server.agent_manager.create_agent_async(agent_create, actor=actor) + +# # Create a run for the agent using RunManager +# run = PydanticRun(agent_id=agent_state.id) +# run = await RunManager().create_run(pydantic_run=run, actor=actor) + +# # Create the agent loop using LettaAgentV3 +# agent_loop = LettaAgentV3(agent_state=agent_state, actor=actor) + +# # message the agent +# input_message = MessageCreate(role=MessageRole.user, content="Hello") + +# # Call step on the agent - may trigger summarization due to large context +# from letta.errors import SystemPromptTokenExceededError + +# with pytest.raises(SystemPromptTokenExceededError): +# response = await agent_loop.step( +# input_messages=[input_message], +# run_id=run.id, +# max_steps=3, +# ) + +# # Repair the agent by shortening the memory blocks and system prompt +# # Update system prompt to a shorter version +# short_system_prompt = "You are a helpful assistant." +# await server.agent_manager.update_agent_async( +# agent_id=agent_state.id, +# agent_update=UpdateAgent(system=short_system_prompt), +# actor=actor, +# ) + +# # Update memory block to a shorter version +# short_memory_value = "The user's name is Alice." +# await server.agent_manager.modify_block_by_label_async( +# agent_id=agent_state.id, +# block_label="human", +# block_update=BlockUpdate(value=short_memory_value), +# actor=actor, +# ) + +# # Reload agent state after repairs +# agent_state = await server.agent_manager.get_agent_by_id_async(agent_id=agent_state.id, actor=actor) +# print("REPAIRED AGENT STATE ======") +# print(agent_state.system) +# print(agent_state.blocks) + +# # Create a new run for the repaired agent +# run = PydanticRun(agent_id=agent_state.id) +# run = await RunManager().create_run(pydantic_run=run, actor=actor) + +# # Create a new agent loop with the repaired agent state +# agent_loop = LettaAgentV3(agent_state=agent_state, actor=actor) + +# # Now the agent should be able to respond without context window errors +# response = await agent_loop.step( +# input_messages=[input_message], +# run_id=run.id, +# max_steps=3, +# ) + +# # Verify we got a valid response after repair +# assert response is not None +# assert response.messages is not None +# print(f"Agent successfully responded after repair with {len(response.messages)} messages") # @pytest.mark.asyncio @@ -1563,3 +1832,453 @@ async def test_summarize_all(server: SyncServer, actor, llm_config: LLMConfig): print(f"Successfully summarized {len(messages)} messages using 'all' mode") print(f"Summary: {summary[:200]}..." if len(summary) > 200 else f"Summary: {summary}") print(f"Using {llm_config.model_endpoint_type} for model {llm_config.model}") + + +@pytest.mark.asyncio +@pytest.mark.parametrize( + "llm_config", + TESTED_LLM_CONFIGS, + ids=[c.model for c in TESTED_LLM_CONFIGS], +) +async def test_summarize_self(server: SyncServer, actor, llm_config: LLMConfig): + """ + Test the summarize_all function with real LLM calls. + + This test verifies that the 'all' summarization mode works correctly, + summarizing the entire conversation into a single summary string. + """ + from letta.llm_api.llm_client import LLMClient + from letta.schemas.agent import AgentType + from letta.services.summarizer.self_summarizer import self_summarize_all + from letta.services.summarizer.summarizer_config import CompactionSettings + from letta.services.telemetry_manager import TelemetryManager + + # Create a summarizer config with "self" mode + handle = llm_config.handle or f"{llm_config.model_endpoint_type}/{llm_config.model}" + summarizer_config = CompactionSettings(model=handle) + summarizer_config.mode = "self" + + # Create test messages - a simple conversation + messages = [ + PydanticMessage( + role=MessageRole.system, + content=[TextContent(type="text", text="You are a helpful assistant.")], + ) + ] + + # Add 10 user-assistant pairs + for i in range(10): + messages.append( + PydanticMessage( + role=MessageRole.user, + content=[TextContent(type="text", text=f"User message {i}: What is {i} + {i}?")], + ) + ) + messages.append( + PydanticMessage( + role=MessageRole.assistant, + content=[TextContent(type="text", text=f"Assistant response {i}: {i} + {i} = {i * 2}.")], + ) + ) + + assert len(messages) == 21, f"Expected 21 messages, got {len(messages)}" + + # Call summarize_all with real LLM + summary, new_in_context_messages = await self_summarize_all( + actor=actor, + agent_id="agent-test-self-sliding-window", + agent_llm_config=llm_config, + telemetry_manager=TelemetryManager(), + llm_client=LLMClient.create(llm_config), + agent_type=AgentType.letta_v1_agent, + messages=messages, + compaction_settings=summarizer_config, + timezone="UTC", + ) + + # Verify the summary was generated + assert len(new_in_context_messages) == 1 + assert summary is not None + assert len(summary) > 0 + assert len(summary) <= 5000 # length should be less than 500 words, give some buffer in test + + print(f"Successfully summarized {len(messages)} messages using 'self' mode") + print(f"Summary: {summary[:200]}..." if len(summary) > 200 else f"Summary: {summary}") + print(f"Using {llm_config.model_endpoint_type} for model {llm_config.model}") + + +@pytest.mark.asyncio +@pytest.mark.parametrize("llm_config", TESTED_LLM_CONFIGS, ids=[c.model for c in TESTED_LLM_CONFIGS]) +async def test_self_mode_fallback(server: SyncServer, actor, llm_config: LLMConfig): + """If self summarize fails, it should have proper fallback.""" + from unittest.mock import AsyncMock, patch + + messages = [ + PydanticMessage( + role=MessageRole.system, + content=[TextContent(type="text", text="You are a helpful assistant.")], + ) + ] + for i in range(10): + messages.append( + PydanticMessage( + role=MessageRole.user, + content=[TextContent(type="text", text=f"User message {i}: Test message {i}.")], + ) + ) + messages.append( + PydanticMessage( + role=MessageRole.assistant, + content=[TextContent(type="text", text=f"Assistant response {i}: Acknowledged message {i}.")], + ) + ) + + agent_state, in_context_messages = await create_agent_with_messages(server, actor, llm_config, messages) + + handle = llm_config.handle or f"{llm_config.model_endpoint_type}/{llm_config.model}" + agent_state.compaction_settings = CompactionSettings(model=handle, mode="self_compact_all") + + agent_loop = LettaAgentV3(agent_state=agent_state, actor=actor) + + # Mock self_summarize_all to always fail + with patch( + "letta.services.summarizer.compact.self_summarize_all", + new_callable=AsyncMock, + side_effect=RuntimeError("Simulated self_summarize_all failure"), + ): + summary_message, compacted_messages, summary_text = await agent_loop.compact(messages=in_context_messages) + + assert summary_message is not None + assert summary_text is not None + assert len(summary_text) > 0 + assert len(compacted_messages) < len(in_context_messages) + print(f"Fallback succeeded: {len(in_context_messages)} -> {len(compacted_messages)} messages") + + +# ============================================================================= +# CompactionStats tests +# ============================================================================= + + +def test_compaction_stats_embedding_in_packed_json(): + """Test that compaction_stats are correctly embedded in the packed JSON by package_summarize_message_no_counts.""" + from letta.system import package_summarize_message_no_counts + + stats = { + "trigger": "post_step_context_check", + "context_tokens_before": 50000, + "context_tokens_after": 15000, + "context_window": 128000, + "messages_count_before": 45, + "messages_count_after": 12, + } + + packed = package_summarize_message_no_counts( + summary="Test summary content", + timezone="UTC", + compaction_stats=stats, + ) + + # Parse the packed JSON + packed_json = json.loads(packed) + + # Verify structure + assert "type" in packed_json + assert packed_json["type"] == "system_alert" + assert "message" in packed_json + assert "Test summary content" in packed_json["message"] + assert "compaction_stats" in packed_json + + # Verify stats content + embedded_stats = packed_json["compaction_stats"] + assert embedded_stats["trigger"] == "post_step_context_check" + assert embedded_stats["context_tokens_before"] == 50000 + assert embedded_stats["context_tokens_after"] == 15000 + assert embedded_stats["context_window"] == 128000 + assert embedded_stats["messages_count_before"] == 45 + assert embedded_stats["messages_count_after"] == 12 + + +def test_compaction_stats_embedding_without_stats(): + """Test that packed JSON works correctly when no stats are provided.""" + from letta.system import package_summarize_message_no_counts + + packed = package_summarize_message_no_counts( + summary="Test summary content", + timezone="UTC", + compaction_stats=None, + ) + + packed_json = json.loads(packed) + + assert "type" in packed_json + assert "message" in packed_json + assert "compaction_stats" not in packed_json + + +def test_extract_compaction_stats_from_packed_json(): + """Test extracting CompactionStats from a packed JSON string.""" + from letta.schemas.letta_message import CompactionStats, extract_compaction_stats_from_packed_json + + packed_json = json.dumps( + { + "type": "system_alert", + "message": "Test summary", + "time": "2024-01-15T10:00:00", + "compaction_stats": { + "trigger": "context_window_exceeded", + "context_tokens_before": 100000, + "context_tokens_after": 30000, + "context_window": 128000, + "messages_count_before": 50, + "messages_count_after": 15, + }, + } + ) + + stats = extract_compaction_stats_from_packed_json(packed_json) + + assert stats is not None + assert isinstance(stats, CompactionStats) + assert stats.trigger == "context_window_exceeded" + assert stats.context_tokens_before == 100000 + assert stats.context_tokens_after == 30000 + assert stats.context_window == 128000 + assert stats.messages_count_before == 50 + assert stats.messages_count_after == 15 + + +def test_extract_compaction_stats_from_packed_json_without_stats(): + """Test that extraction returns None when no stats are present (backward compatibility).""" + from letta.schemas.letta_message import extract_compaction_stats_from_packed_json + + # Old format without compaction_stats + packed_json = json.dumps( + { + "type": "system_alert", + "message": "Test summary", + "time": "2024-01-15T10:00:00", + } + ) + + stats = extract_compaction_stats_from_packed_json(packed_json) + + assert stats is None + + +def test_extract_compaction_stats_from_packed_json_invalid_json(): + """Test that extraction handles invalid JSON gracefully.""" + from letta.schemas.letta_message import extract_compaction_stats_from_packed_json + + stats = extract_compaction_stats_from_packed_json("not valid json") + assert stats is None + + stats = extract_compaction_stats_from_packed_json("") + assert stats is None + + +def test_extract_compaction_stats_from_packed_json_invalid_stats(): + """Test that extraction handles invalid stats structure gracefully.""" + from letta.schemas.letta_message import extract_compaction_stats_from_packed_json + + # Missing required fields + packed_json = json.dumps( + { + "type": "system_alert", + "message": "Test summary", + "compaction_stats": { + "trigger": "test", + # Missing context_window, messages_count_before, messages_count_after + }, + } + ) + + stats = extract_compaction_stats_from_packed_json(packed_json) + assert stats is None # Should return None due to validation failure + + +def test_extract_compaction_stats_from_message(): + """Test extracting CompactionStats from a Message object.""" + from letta.agents.letta_agent_v3 import extract_compaction_stats_from_message + from letta.schemas.letta_message import CompactionStats + + packed_content = json.dumps( + { + "type": "system_alert", + "message": "Test summary", + "time": "2024-01-15T10:00:00", + "compaction_stats": { + "trigger": "post_step_context_check", + "context_tokens_before": 50000, + "context_tokens_after": 15000, + "context_window": 128000, + "messages_count_before": 45, + "messages_count_after": 12, + }, + } + ) + + message = PydanticMessage( + role=MessageRole.summary, + content=[TextContent(type="text", text=packed_content)], + ) + + stats = extract_compaction_stats_from_message(message) + + assert stats is not None + assert isinstance(stats, CompactionStats) + assert stats.trigger == "post_step_context_check" + assert stats.context_tokens_before == 50000 + assert stats.messages_count_after == 12 + + +def test_extract_compaction_stats_from_message_without_stats(): + """Test that Message extraction returns None when no stats are present.""" + from letta.agents.letta_agent_v3 import extract_compaction_stats_from_message + + packed_content = json.dumps( + { + "type": "system_alert", + "message": "Old format summary", + "time": "2024-01-15T10:00:00", + } + ) + + message = PydanticMessage( + role=MessageRole.summary, + content=[TextContent(type="text", text=packed_content)], + ) + + stats = extract_compaction_stats_from_message(message) + assert stats is None + + +def test_message_to_summary_message_with_stats(): + """Test that Message._convert_summary_message extracts compaction_stats.""" + from letta.schemas.letta_message import CompactionStats + + packed_content = json.dumps( + { + "type": "system_alert", + "message": "Summary of conversation", + "time": "2024-01-15T10:00:00", + "compaction_stats": { + "trigger": "context_window_exceeded", + "context_tokens_before": 80000, + "context_tokens_after": 25000, + "context_window": 128000, + "messages_count_before": 60, + "messages_count_after": 20, + }, + } + ) + + message = PydanticMessage( + role=MessageRole.summary, + content=[TextContent(type="text", text=packed_content)], + ) + + # Convert to SummaryMessage (as_user_message=False) + summary_msg = message._convert_summary_message(as_user_message=False) + + assert summary_msg.message_type == "summary_message" + assert summary_msg.compaction_stats is not None + assert isinstance(summary_msg.compaction_stats, CompactionStats) + assert summary_msg.compaction_stats.trigger == "context_window_exceeded" + assert summary_msg.compaction_stats.context_tokens_before == 80000 + + +def test_message_to_summary_message_backward_compatible(): + """Test that old messages without compaction_stats still convert correctly.""" + packed_content = json.dumps( + { + "type": "system_alert", + "message": "Old format summary without stats", + "time": "2024-01-15T10:00:00", + } + ) + + message = PydanticMessage( + role=MessageRole.summary, + content=[TextContent(type="text", text=packed_content)], + ) + + summary_msg = message._convert_summary_message(as_user_message=False) + + assert summary_msg.message_type == "summary_message" + assert summary_msg.compaction_stats is None # Should be None for old messages + assert "Old format summary" in summary_msg.summary + + +@pytest.mark.asyncio +@pytest.mark.parametrize( + "llm_config", + TESTED_LLM_CONFIGS, + ids=[c.model for c in TESTED_LLM_CONFIGS], +) +async def test_compact_with_stats_params_embeds_stats(server: SyncServer, actor, llm_config: LLMConfig): + """ + Integration test: compact() with trigger/context_tokens_before/messages_count_before + embeds compaction_stats in the packed message content. + """ + from letta.agents.letta_agent_v3 import extract_compaction_stats_from_message + + # Create a conversation with enough messages to summarize + messages = [ + PydanticMessage( + role=MessageRole.system, + content=[TextContent(type="text", text="You are a helpful assistant.")], + ) + ] + for i in range(10): + messages.append( + PydanticMessage( + role=MessageRole.user, + content=[TextContent(type="text", text=f"User message {i}")], + ) + ) + messages.append( + PydanticMessage( + role=MessageRole.assistant, + content=[TextContent(type="text", text=f"Response {i}")], + ) + ) + + agent_state, in_context_messages = await create_agent_with_messages(server, actor, llm_config, messages) + + handle = llm_config.handle or f"{llm_config.model_endpoint_type}/{llm_config.model}" + agent_state.compaction_settings = CompactionSettings(model=handle, mode="all") + + agent_loop = LettaAgentV3(agent_state=agent_state, actor=actor) + + # Call compact with stats params + summary_message_obj, compacted_messages, _summary_text = await agent_loop.compact( + messages=in_context_messages, + use_summary_role=True, + trigger="post_step_context_check", + context_tokens_before=50000, + messages_count_before=len(in_context_messages), + ) + + # Extract stats from the message + stats = extract_compaction_stats_from_message(summary_message_obj) + + assert stats is not None, "CompactionStats should be embedded in the message" + assert stats.trigger == "post_step_context_check" + assert stats.context_tokens_before == 50000 + assert stats.messages_count_before == len(in_context_messages) + assert stats.context_tokens_after is not None # Should be set by compact() + assert stats.messages_count_after == len(compacted_messages) # final_messages already includes summary + assert stats.context_window == llm_config.context_window + + +### basic self summarization + + +### fallback chain + +### basic self sliding window summarization + +### self sliding window preserves recent msgs + +### self mode return compaction stats diff --git a/tests/integration_test_system_prompt_prefix_caching.py b/tests/integration_test_system_prompt_prefix_caching.py new file mode 100644 index 00000000..de19aa03 --- /dev/null +++ b/tests/integration_test_system_prompt_prefix_caching.py @@ -0,0 +1,178 @@ +""" +Integration tests for system prompt prefix caching optimization. + +These tests verify that the system prompt is NOT rebuilt on every step, +only after compaction or message reset. This helps preserve prefix caching +for LLM providers. +""" + +import pytest +from letta_client import Letta + + +@pytest.fixture(scope="module") +def client(server_url: str) -> Letta: + """Creates and returns a synchronous Letta REST client for testing.""" + return Letta(base_url=server_url) + + +@pytest.fixture(scope="function") +def agent(client: Letta): + """Create a test agent and clean up after test.""" + agent_state = client.agents.create( + name="test-prefix-cache-agent", + include_base_tools=True, + model="openai/gpt-4o-mini", + embedding="openai/text-embedding-ada-002", + ) + yield agent_state + # Cleanup + try: + client.agents.delete(agent_state.id) + except Exception: + pass + + +class TestSystemPromptPrefixCaching: + """Test that system prompt stays stable during normal agent execution.""" + + def test_system_prompt_stable_after_memory_tool_and_messages(self, client: Letta, agent): + """ + Test workflow: + 1. Get initial system prompt and human block value + 2. Tell agent to update its memory block using the memory tool + 3. Verify block was modified but system prompt hasn't changed + 4. Send another message to the agent + 5. Verify system prompt still hasn't changed + 6. Manually update a block via API + 7. Send another message and verify system prompt still hasn't changed + (memory block changes are deferred to compaction) + """ + # Step 1: Get initial context window, system prompt, and human block value + initial_context = client.agents.context.retrieve(agent.id) + initial_system_prompt = initial_context.system_prompt + assert initial_system_prompt, "Initial system prompt should not be empty" + + # Get initial human block value + human_block = None + for block in agent.memory.blocks: + if block.label == "human": + human_block = block + break + assert human_block, "Agent should have a 'human' memory block" + initial_block_value = human_block.value + + # Step 2: Tell the agent to update its memory using the memory tool + response = client.agents.messages.create( + agent_id=agent.id, + messages=[ + { + "role": "user", + "content": "Please use the core_memory_append tool to add the following to your 'human' block: 'User likes pizza.'", + } + ], + ) + assert response.messages, "Agent should respond with messages" + + # Step 3: Verify block was modified but system prompt hasn't changed + # Check that the block was actually modified + updated_block = client.blocks.retrieve(human_block.id) + assert updated_block.value != initial_block_value, "Memory block should have been modified by the agent" + assert "pizza" in updated_block.value.lower(), "Memory block should contain the new content about pizza" + + # Verify system prompt hasn't changed + context_after_memory_update = client.agents.context.retrieve(agent.id) + system_prompt_after_memory = context_after_memory_update.system_prompt + assert system_prompt_after_memory == initial_system_prompt, ( + "System prompt should NOT change after agent uses memory tool (deferred to compaction)" + ) + + # Step 4: Send another message to the agent + response2 = client.agents.messages.create( + agent_id=agent.id, + messages=[ + { + "role": "user", + "content": "What is my favorite food?", + } + ], + ) + assert response2.messages, "Agent should respond with messages" + + # Step 5: Verify system prompt still hasn't changed + context_after_second_message = client.agents.context.retrieve(agent.id) + system_prompt_after_second = context_after_second_message.system_prompt + assert system_prompt_after_second == initial_system_prompt, "System prompt should remain stable after multiple messages" + + # Step 6: Manually update a block via the API + # Find the human block + human_block = None + for block in agent.memory.blocks: + if block.label == "human": + human_block = block + break + assert human_block, "Agent should have a 'human' memory block" + + # Update the block directly via API + client.blocks.modify( + block_id=human_block.id, + value=human_block.value + "\nUser also likes sushi.", + ) + + # Step 7: Send another message and verify system prompt still hasn't changed + response3 = client.agents.messages.create( + agent_id=agent.id, + messages=[ + { + "role": "user", + "content": "What foods do I like?", + } + ], + ) + assert response3.messages, "Agent should respond with messages" + + # Verify system prompt STILL hasn't changed (deferred to compaction/reset) + context_after_manual_update = client.agents.context.retrieve(agent.id) + system_prompt_after_manual = context_after_manual_update.system_prompt + assert system_prompt_after_manual == initial_system_prompt, ( + "System prompt should NOT change after manual block update (deferred to compaction)" + ) + + def test_system_prompt_updates_after_reset(self, client: Letta, agent): + """ + Test that system prompt IS updated after message reset. + 1. Get initial system prompt + 2. Manually update a memory block + 3. Reset messages + 4. Verify system prompt HAS changed to include the new memory + """ + # Step 1: Get initial system prompt + initial_context = client.agents.context.retrieve(agent.id) + initial_system_prompt = initial_context.system_prompt + + # Step 2: Manually update a block via the API + human_block = None + for block in agent.memory.blocks: + if block.label == "human": + human_block = block + break + assert human_block, "Agent should have a 'human' memory block" + + # Add distinctive text that we can verify in the system prompt + new_memory_content = "UNIQUE_TEST_MARKER_12345: User loves ice cream." + client.blocks.modify( + block_id=human_block.id, + value=human_block.value + f"\n{new_memory_content}", + ) + + # Step 3: Reset messages (this should trigger system prompt rebuild) + client.agents.messages.reset(agent.id) + + # Step 4: Verify system prompt HAS changed and includes the new memory + context_after_reset = client.agents.context.retrieve(agent.id) + system_prompt_after_reset = context_after_reset.system_prompt + + assert system_prompt_after_reset != initial_system_prompt, "System prompt SHOULD change after message reset" + assert "UNIQUE_TEST_MARKER_12345" in system_prompt_after_reset, ( + "System prompt should include the updated memory block content after reset" + ) diff --git a/tests/integration_test_turbopuffer.py b/tests/integration_test_turbopuffer.py index 31fa21be..d18874a4 100644 --- a/tests/integration_test_turbopuffer.py +++ b/tests/integration_test_turbopuffer.py @@ -45,7 +45,7 @@ async def sarah_agent(server, default_user): # Cleanup try: await server.agent_manager.delete_agent_async(agent.id, default_user) - except: + except Exception: pass @@ -151,7 +151,7 @@ async def wait_for_embedding( if any(msg["id"] == message_id for msg, _, _ in results): return True - except Exception as e: + except Exception: # Log but don't fail - Turbopuffer might still be processing pass @@ -347,7 +347,7 @@ async def test_turbopuffer_metadata_attributes(default_user, enable_turbopuffer) # Clean up on error try: await client.delete_all_passages(archive_id) - except: + except Exception: pass raise e @@ -409,7 +409,7 @@ async def test_hybrid_search_with_real_tpuf(default_user, enable_turbopuffer): ] # Create simple embeddings for testing (normally you'd use a real embedding model) - embeddings = [[float(i), float(i + 5), float(i + 10)] for i in range(len(texts))] + [[float(i), float(i + 5), float(i + 10)] for i in range(len(texts))] passage_ids = [f"passage-{str(uuid.uuid4())}" for _ in texts] # Insert passages @@ -487,7 +487,7 @@ async def test_hybrid_search_with_real_tpuf(default_user, enable_turbopuffer): # Clean up try: await client.delete_all_passages(archive_id) - except: + except Exception: pass @@ -522,7 +522,7 @@ async def test_tag_filtering_with_real_tpuf(default_user, enable_turbopuffer): ["javascript", "react"], ] - embeddings = [[float(i), float(i + 5), float(i + 10)] for i in range(len(texts))] + [[float(i), float(i + 5), float(i + 10)] for i in range(len(texts))] passage_ids = [f"passage-{str(uuid.uuid4())}" for _ in texts] # Insert passages with tags @@ -615,7 +615,7 @@ async def test_tag_filtering_with_real_tpuf(default_user, enable_turbopuffer): # Clean up try: await client.delete_all_passages(archive_id) - except: + except Exception: pass @@ -754,7 +754,7 @@ async def test_temporal_filtering_with_real_tpuf(default_user, enable_turbopuffe # Clean up try: await client.delete_all_passages(archive_id) - except: + except Exception: pass @@ -865,12 +865,6 @@ def test_message_text_extraction(server, default_user): agent_id="test-agent", ) text6 = manager._extract_message_text(msg6) - expected_parts = [ - "User said:", - 'Tool call: search({\n "query": "test"\n})', - "Tool result: Found 5 results", - "I should help the user", - ] assert ( text6 == '{"content": "User said: Tool call: search({\\n \\"query\\": \\"test\\"\\n}) Tool result: Found 5 results I should help the user"}' @@ -1112,7 +1106,7 @@ async def test_message_dual_write_with_real_tpuf(enable_message_embedding, defau created_ats = [datetime.now(timezone.utc) for _ in message_texts] # Generate embeddings (dummy for test) - embeddings = [[float(i), float(i + 1), float(i + 2)] for i in range(len(message_texts))] + [[float(i), float(i + 1), float(i + 2)] for i in range(len(message_texts))] # Insert messages into Turbopuffer success = await client.insert_messages( @@ -1144,7 +1138,7 @@ async def test_message_dual_write_with_real_tpuf(enable_message_embedding, defau # Clean up namespace try: await client.delete_all_messages(agent_id) - except: + except Exception: pass @@ -1205,7 +1199,7 @@ async def test_message_vector_search_with_real_tpuf(enable_message_embedding, de # Clean up namespace try: await client.delete_all_messages(agent_id) - except: + except Exception: pass @@ -1268,7 +1262,7 @@ async def test_message_hybrid_search_with_real_tpuf(enable_message_embedding, de # Clean up namespace try: await client.delete_all_messages(agent_id) - except: + except Exception: pass @@ -1340,7 +1334,7 @@ async def test_message_role_filtering_with_real_tpuf(enable_message_embedding, d # Clean up namespace try: await client.delete_all_messages(agent_id) - except: + except Exception: pass @@ -1357,7 +1351,7 @@ async def test_message_search_fallback_to_sql(server, default_user, sarah_agent) settings.embed_all_messages = False # Create messages - messages = await server.message_manager.create_many_messages_async( + await server.message_manager.create_many_messages_async( pydantic_msgs=[ PydanticMessage( role=MessageRole.user, @@ -1398,7 +1392,7 @@ async def test_message_update_reindexes_in_turbopuffer(server, default_user, sar """Test that updating a message properly deletes and re-inserts with new embedding in Turbopuffer""" from letta.schemas.message import MessageUpdate - embedding_config = sarah_agent.embedding_config or EmbeddingConfig.default_config(provider="openai") + sarah_agent.embedding_config or EmbeddingConfig.default_config(provider="openai") # Create initial message messages = await server.message_manager.create_many_messages_async( @@ -1493,8 +1487,6 @@ async def test_message_deletion_syncs_with_turbopuffer(server, default_user, ena actor=default_user, ) - embedding_config = agent_a.embedding_config - try: # Create 5 messages for agent A agent_a_messages = [] @@ -1597,7 +1589,7 @@ async def test_turbopuffer_failure_does_not_break_postgres(server, default_user, from letta.schemas.message import MessageUpdate - embedding_config = sarah_agent.embedding_config or EmbeddingConfig.default_config(provider="openai") + sarah_agent.embedding_config or EmbeddingConfig.default_config(provider="openai") # Create initial messages messages = await server.message_manager.create_many_messages_async( @@ -1668,7 +1660,7 @@ async def test_turbopuffer_failure_does_not_break_postgres(server, default_user, @pytest.mark.skipif(not settings.tpuf_api_key, reason="Turbopuffer API key not configured") async def test_message_creation_background_mode(server, default_user, sarah_agent, enable_message_embedding): """Test that messages are embedded in background when strict_mode=False""" - embedding_config = sarah_agent.embedding_config or EmbeddingConfig.default_config(provider="openai") + sarah_agent.embedding_config or EmbeddingConfig.default_config(provider="openai") # Create message in background mode messages = await server.message_manager.create_many_messages_async( @@ -1723,7 +1715,7 @@ async def test_message_update_background_mode(server, default_user, sarah_agent, """Test that message updates work in background mode""" from letta.schemas.message import MessageUpdate - embedding_config = sarah_agent.embedding_config or EmbeddingConfig.default_config(provider="openai") + sarah_agent.embedding_config or EmbeddingConfig.default_config(provider="openai") # Create initial message with strict_mode=True to ensure it's embedded messages = await server.message_manager.create_many_messages_async( @@ -1899,7 +1891,7 @@ async def test_message_date_filtering_with_real_tpuf(enable_message_embedding, d # Clean up namespace try: await client.delete_all_messages(agent_id) - except: + except Exception: pass @@ -2403,7 +2395,7 @@ async def test_query_messages_with_mixed_conversation_id_presence(enable_message async with AsyncTurbopuffer(api_key=client.api_key, region=client.region) as tpuf: namespace = tpuf.namespace(namespace_name) await namespace.delete_all() - except: + except Exception: pass @@ -2485,14 +2477,14 @@ async def test_query_messages_by_org_id_with_missing_conversation_id_schema(enab async with AsyncTurbopuffer(api_key=client.api_key, region=client.region) as tpuf: namespace = tpuf.namespace(namespace_name) await namespace.delete_all() - except: + except Exception: pass @pytest.mark.asyncio async def test_system_messages_not_embedded_during_agent_creation(server, default_user, enable_message_embedding): """Test that system messages are filtered out before being passed to the embedding pipeline during agent creation""" - from unittest.mock import AsyncMock, patch + from unittest.mock import patch from letta.schemas.agent import CreateAgent from letta.schemas.llm_config import LLMConfig @@ -2541,5 +2533,5 @@ async def test_system_messages_not_embedded_during_agent_creation(server, defaul # Clean up try: await server.agent_manager.delete_agent_async(agent.id, default_user) - except: + except Exception: pass diff --git a/tests/integration_test_typescript_tool_execution_sandbox.py b/tests/integration_test_typescript_tool_execution_sandbox.py index 97528214..7a39eea1 100644 --- a/tests/integration_test_typescript_tool_execution_sandbox.py +++ b/tests/integration_test_typescript_tool_execution_sandbox.py @@ -14,7 +14,6 @@ from letta.schemas.tool import Tool as PydanticTool, ToolCreate from letta.schemas.user import User from letta.server.server import SyncServer from letta.services.organization_manager import OrganizationManager -from letta.services.tool_executor.tool_execution_sandbox import ToolExecutionSandbox from letta.services.tool_manager import ToolManager from letta.services.tool_sandbox.e2b_sandbox import AsyncToolSandboxE2B from letta.services.user_manager import UserManager diff --git a/tests/integration_test_usage_tracking.py b/tests/integration_test_usage_tracking.py index 018fb312..f4b5098e 100644 --- a/tests/integration_test_usage_tracking.py +++ b/tests/integration_test_usage_tracking.py @@ -17,7 +17,7 @@ import json import logging import os import uuid -from typing import Any, Dict, List, Optional, Tuple +from typing import Any, List, Optional, Tuple import pytest from dotenv import load_dotenv @@ -46,7 +46,7 @@ CACHE_TEST_CONFIGS = [ # OpenAI gpt-4o with prompt caching (Chat Completions API) ("openai/gpt-4o", {"provider_type": "openai"}), # Gemini 3 Pro Preview with context caching - ("google_ai/gemini-3-pro-preview", {"provider_type": "google_ai"}), + ("google_ai/gemini-3.1-pro-preview", {"provider_type": "google_ai"}), ] REASONING_TEST_CONFIGS = [ @@ -59,7 +59,7 @@ REASONING_TEST_CONFIGS = [ ("openai/gpt-5.1", {"provider_type": "openai", "reasoning": {"reasoning_effort": "low"}}), # Gemini 3 Pro Preview with thinking enabled ( - "google_ai/gemini-3-pro-preview", + "google_ai/gemini-3.1-pro-preview", {"provider_type": "google_ai", "thinking_config": {"include_thoughts": True, "thinking_budget": 1024}}, ), ] @@ -400,7 +400,7 @@ async def test_run_level_usage_aggregation( try: # Send multiple messages to create multiple steps - response1: Run = await async_client.agents.messages.send_message( + await async_client.agents.messages.send_message( agent_id=agent.id, messages=[MessageCreateParam(role="user", content="Message 1")], ) diff --git a/tests/managers/conftest.py b/tests/managers/conftest.py index 6e56cdcd..6fbd4c79 100644 --- a/tests/managers/conftest.py +++ b/tests/managers/conftest.py @@ -6,7 +6,6 @@ This conftest.py makes fixtures available to all test files in the tests/manager import os import time -import uuid from typing import Tuple import pytest @@ -23,7 +22,7 @@ from letta.schemas.agent import CreateAgent from letta.schemas.block import Block as PydanticBlock, CreateBlock from letta.schemas.embedding_config import EmbeddingConfig from letta.schemas.enums import JobStatus, MessageRole, RunStatus -from letta.schemas.environment_variables import SandboxEnvironmentVariableCreate, SandboxEnvironmentVariableUpdate +from letta.schemas.environment_variables import SandboxEnvironmentVariableCreate from letta.schemas.file import FileMetadata as PydanticFileMetadata from letta.schemas.job import BatchJob, Job as PydanticJob from letta.schemas.letta_message_content import TextContent @@ -498,7 +497,7 @@ async def sandbox_env_var_fixture(server: SyncServer, sandbox_config_fixture, de @pytest.fixture async def file_attachment(server: SyncServer, default_user, sarah_agent, default_file): """Create a file attachment to an agent.""" - assoc, closed_files = await server.file_agent_manager.attach_file( + assoc, _closed_files = await server.file_agent_manager.attach_file( agent_id=sarah_agent.id, file_id=default_file.id, file_name=default_file.file_name, diff --git a/tests/managers/test_agent_manager.py b/tests/managers/test_agent_manager.py index 4a9e8598..7a08c61b 100644 --- a/tests/managers/test_agent_manager.py +++ b/tests/managers/test_agent_manager.py @@ -1,19 +1,9 @@ -import json -import logging -import os -import random -import re -import string import time import uuid -from datetime import datetime, timedelta, timezone -from typing import List -from unittest.mock import AsyncMock, Mock, patch +from datetime import datetime, timezone +from unittest.mock import patch import pytest -from _pytest.python_api import approx -from anthropic.types.beta import BetaMessage -from anthropic.types.beta.messages import BetaMessageBatchIndividualResponse, BetaMessageBatchSucceededResult # Import shared fixtures and constants from conftest from conftest import ( @@ -21,87 +11,33 @@ from conftest import ( DEFAULT_EMBEDDING_CONFIG, USING_SQLITE, ) -from openai.types.chat.chat_completion_message_tool_call import ChatCompletionMessageToolCall as OpenAIToolCall, Function as OpenAIFunction from sqlalchemy import func, select -from sqlalchemy.exc import IntegrityError, InvalidRequestError -from sqlalchemy.orm.exc import StaleDataError -from letta.config import LettaConfig from letta.constants import ( - BASE_MEMORY_TOOLS, - BASE_SLEEPTIME_TOOLS, - BASE_TOOLS, - BASE_VOICE_SLEEPTIME_CHAT_TOOLS, - BASE_VOICE_SLEEPTIME_TOOLS, - BUILTIN_TOOLS, - DEFAULT_ORG_ID, - DEFAULT_ORG_NAME, - FILES_TOOLS, - LETTA_TOOL_EXECUTION_DIR, - LETTA_TOOL_SET, LOCAL_ONLY_MULTI_AGENT_TOOLS, - MCP_TOOL_TAG_NAME_PREFIX, MULTI_AGENT_TOOLS, ) -from letta.data_sources.redis_client import NoopAsyncRedisClient, get_redis_client from letta.errors import LettaAgentNotFoundError -from letta.functions.functions import derive_openai_json_schema, parse_source_code -from letta.functions.mcp_client.types import MCPTool -from letta.helpers import ToolRulesSolver -from letta.helpers.datetime_helpers import AsyncTimer -from letta.jobs.types import ItemUpdateInfo, RequestStatusUpdateInfo, StepStatusUpdateInfo -from letta.orm import Base, Block -from letta.orm.block_history import BlockHistory -from letta.orm.errors import NoResultFound, UniqueConstraintViolationError -from letta.orm.file import FileContent as FileContentModel, FileMetadata as FileMetadataModel +from letta.orm.file import FileContent as FileContentModel from letta.schemas.agent import CreateAgent, InternalTemplateAgentCreate, UpdateAgent -from letta.schemas.block import Block as PydanticBlock, BlockUpdate, CreateBlock +from letta.schemas.block import CreateBlock from letta.schemas.embedding_config import EmbeddingConfig from letta.schemas.enums import ( - ActorType, - AgentStepStatus, - FileProcessingStatus, - JobStatus, - JobType, MessageRole, - ProviderType, - SandboxType, - StepStatus, - TagMatchMode, - ToolType, - VectorDBProvider, ) -from letta.schemas.environment_variables import SandboxEnvironmentVariableCreate, SandboxEnvironmentVariableUpdate -from letta.schemas.file import FileMetadata, FileMetadata as PydanticFileMetadata -from letta.schemas.identity import IdentityCreate, IdentityProperty, IdentityPropertyType, IdentityType, IdentityUpdate, IdentityUpsert -from letta.schemas.job import BatchJob, Job, Job as PydanticJob, JobUpdate, LettaRequestConfig -from letta.schemas.letta_message import UpdateAssistantMessage, UpdateReasoningMessage, UpdateSystemMessage, UpdateUserMessage from letta.schemas.letta_message_content import TextContent -from letta.schemas.letta_stop_reason import LettaStopReason, StopReasonType -from letta.schemas.llm_batch_job import AgentStepState, LLMBatchItem +from letta.schemas.letta_stop_reason import StopReasonType from letta.schemas.llm_config import LLMConfig -from letta.schemas.message import Message as PydanticMessage, MessageCreate, MessageUpdate -from letta.schemas.model import ModelSettings -from letta.schemas.openai.chat_completion_response import UsageStatistics -from letta.schemas.organization import Organization, Organization as PydanticOrganization, OrganizationUpdate -from letta.schemas.passage import Passage as PydanticPassage -from letta.schemas.pip_requirement import PipRequirement -from letta.schemas.run import Run as PydanticRun -from letta.schemas.sandbox_config import E2BSandboxConfig, LocalSandboxConfig, SandboxConfigCreate, SandboxConfigUpdate -from letta.schemas.source import Source as PydanticSource, SourceUpdate -from letta.schemas.tool import Tool as PydanticTool, ToolCreate, ToolUpdate +from letta.schemas.message import MessageCreate +from letta.schemas.source import Source as PydanticSource from letta.schemas.tool_rule import InitToolRule -from letta.schemas.user import User as PydanticUser, UserUpdate from letta.server.db import db_registry from letta.server.server import SyncServer -from letta.services.block_manager import BlockManager from letta.services.helpers.agent_manager_helper import calculate_base_tools, calculate_multi_agent_tools, validate_agent_exists_async -from letta.services.step_manager import FeedbackType from letta.services.summarizer.summarizer_config import CompactionSettings -from letta.settings import settings, tool_settings +from letta.settings import settings from letta.utils import calculate_file_defaults_based_on_context_window from tests.helpers.utils import comprehensive_agent_checks, validate_context_window_overview -from tests.utils import random_string # ====================================================================================================================== # Helper Functions @@ -306,7 +242,7 @@ async def test_create_agent_with_model_handle_uses_correct_llm_config(server: Sy @pytest.mark.asyncio -async def test_compaction_settings_model_uses_separate_llm_config_for_summarization(default_user): +async def test_compaction_settings_model_uses_separate_llm_config_for_summarization(server: SyncServer, default_user): """When compaction_settings.model differs from the agent model, use a separate llm_config. This test exercises the summarization helpers directly to avoid external @@ -314,19 +250,21 @@ async def test_compaction_settings_model_uses_separate_llm_config_for_summarizat the LLMConfig used for the summarizer request. """ - from letta.agents.letta_agent_v3 import LettaAgentV3 from letta.schemas.agent import AgentState as PydanticAgentState from letta.schemas.enums import AgentType, MessageRole from letta.schemas.memory import Memory from letta.schemas.message import Message as PydanticMessage from letta.schemas.model import OpenAIModelSettings, OpenAIReasoning + from letta.services.summarizer.compact import build_summarizer_llm_config + + await server.init_async(init_with_default_org_and_user=True) # Base agent LLM config base_llm_config = LLMConfig.default_config("gpt-4o-mini") assert base_llm_config.model == "gpt-4o-mini" - # Configure compaction to use a different summarizer model - summarizer_handle = "openai/gpt-5-mini" + # Configure compaction to use a different summarizer model (!= default openai summarizer model) + summarizer_handle = "openai/gpt-5-nano" summarizer_model_settings = OpenAIModelSettings( max_output_tokens=1234, temperature=0.1, @@ -343,7 +281,7 @@ async def test_compaction_settings_model_uses_separate_llm_config_for_summarizat ) # Minimal message buffer: system + one user + one assistant - messages = [ + [ PydanticMessage( role=MessageRole.system, content=[TextContent(type="text", text="You are a helpful assistant.")], @@ -406,16 +344,11 @@ async def test_compaction_settings_model_uses_separate_llm_config_for_summarizat tool_rules=None, ) - # Create a mock agent instance to call the instance method - mock_agent = Mock(spec=LettaAgentV3) - mock_agent.actor = default_user - mock_agent.logger = Mock() - - # Use the instance method to derive summarizer llm_config - summarizer_llm_config = await LettaAgentV3._build_summarizer_llm_config( - mock_agent, + # Use the shared function to derive summarizer llm_config + summarizer_llm_config = await build_summarizer_llm_config( agent_llm_config=agent_state.llm_config, summarizer_config=agent_state.compaction_settings, + actor=default_user, ) # Agent model remains the base model @@ -423,12 +356,101 @@ async def test_compaction_settings_model_uses_separate_llm_config_for_summarizat # Summarizer config should use the handle/model from compaction_settings assert summarizer_llm_config.handle == summarizer_handle - assert summarizer_llm_config.model == "gpt-5-mini" + assert summarizer_llm_config.model == "gpt-5-nano" # And should reflect overrides from model_settings assert summarizer_llm_config.max_tokens == 1234 assert summarizer_llm_config.temperature == 0.1 +@pytest.mark.asyncio +async def test_create_agent_sets_default_compaction_model_anthropic(server: SyncServer, default_user): + """When no compaction_settings provided for Anthropic agent, default haiku model should be set.""" + from letta.schemas.agent import CreateAgent + + await server.init_async(init_with_default_org_and_user=True) + + # Upsert base tools + await server.tool_manager.upsert_base_tools_async(actor=default_user) + + # Create agent without compaction_settings using Anthropic LLM + agent = await server.create_agent_async( + CreateAgent( + name="test-default-compaction-anthropic", + model="anthropic/claude-sonnet-4-5-20250929", + # No compaction_settings + ), + actor=default_user, + ) + + # Should have default haiku model set + assert agent.compaction_settings is not None + assert agent.compaction_settings.model == "anthropic/claude-haiku-4-5-20251001" + + +@pytest.mark.asyncio +async def test_create_agent_sets_default_compaction_model_openai(server: SyncServer, default_user): + """When no compaction_settings provided for OpenAI agent, default gpt-5-mini model should be set.""" + from letta.schemas.agent import CreateAgent + + await server.init_async(init_with_default_org_and_user=True) + + # Upsert base tools + await server.tool_manager.upsert_base_tools_async(actor=default_user) + + # Create agent without compaction_settings using OpenAI LLM + agent = await server.create_agent_async( + CreateAgent( + name="test-default-compaction-openai", + model="openai/gpt-4o-mini", + # No compaction_settings + ), + actor=default_user, + ) + + # Should have default gpt-5-mini model set + assert agent.compaction_settings is not None + assert agent.compaction_settings.model == "openai/gpt-5-mini" + + +@pytest.mark.asyncio +async def test_create_agent_preserves_compaction_settings_when_model_set(server: SyncServer, default_user): + """When compaction_settings.model is already set, it should not be overwritten.""" + from letta.schemas.agent import CreateAgent + from letta.schemas.model import OpenAIModelSettings, OpenAIReasoning + from letta.services.summarizer.summarizer_config import CompactionSettings + + await server.init_async(init_with_default_org_and_user=True) + + # Upsert base tools + await server.tool_manager.upsert_base_tools_async(actor=default_user) + + summarizer_handle = "gpt-4o-mini" + + summarizer_config = CompactionSettings( + model=summarizer_handle, + model_settings=OpenAIModelSettings(max_output_tokens=1234, temperature=0.1, reasoning=OpenAIReasoning(reasoning_effort="high")), + prompt="You are a summarizer.", + clip_chars=2000, + mode="all", + sliding_window_percentage=0.3, + ) + + # Create agent with explicit compaction_settings model + agent = await server.create_agent_async( + CreateAgent( + name="test-preserve-compaction", + model="openai/gpt-5.2-codex", + compaction_settings=summarizer_config, + ), + actor=default_user, + ) + + # Should preserve the custom model, not override with gpt-5-mini default + assert agent.compaction_settings is not None + assert agent.compaction_settings.model == summarizer_handle + assert agent.compaction_settings.mode == "all" + + @pytest.mark.asyncio async def test_calculate_multi_agent_tools(set_letta_environment): """Test that calculate_multi_agent_tools excludes local-only tools in production.""" @@ -569,10 +591,10 @@ async def test_get_context_window_basic( server: SyncServer, comprehensive_test_agent_fixture, default_user, default_file, set_letta_environment ): # Test agent creation - created_agent, create_agent_request = comprehensive_test_agent_fixture + created_agent, _create_agent_request = comprehensive_test_agent_fixture # Attach a file - assoc, closed_files = await server.file_agent_manager.attach_file( + assoc, _closed_files = await server.file_agent_manager.attach_file( agent_id=created_agent.id, file_id=default_file.id, file_name=default_file.file_name, @@ -691,7 +713,7 @@ async def test_update_agent(server: SyncServer, comprehensive_test_agent_fixture system="train system", llm_config=LLMConfig.default_config("gpt-4o-mini"), embedding_config=EmbeddingConfig.default_config(model_name="letta"), - message_ids=["10", "20"], + message_ids=[f"message-{uuid.uuid4()}", f"message-{uuid.uuid4()}"], metadata={"train_key": "train_value"}, tool_exec_environment_variables={"test_env_var_key_a": "a", "new_tool_exec_key": "n"}, message_buffer_autoclear=False, @@ -756,9 +778,6 @@ async def test_update_agent_compaction_settings(server: SyncServer, comprehensiv """Test that an agent's compaction_settings can be updated""" agent, _ = comprehensive_test_agent_fixture - # Verify initial state (should be None or default) - assert agent.compaction_settings is None - # Create new compaction settings llm_config = LLMConfig.default_config("gpt-4o-mini") model_settings = llm_config._to_model_settings() @@ -948,7 +967,7 @@ async def test_update_agent_last_stop_reason(server: SyncServer, comprehensive_t @pytest.mark.asyncio async def test_list_agents_select_fields_empty(server: SyncServer, comprehensive_test_agent_fixture, default_user): # Create an agent using the comprehensive fixture. - created_agent, create_agent_request = comprehensive_test_agent_fixture + _created_agent, _create_agent_request = comprehensive_test_agent_fixture # List agents using an empty list for select_fields. agents = await server.agent_manager.list_agents_async(actor=default_user, include_relationships=[]) @@ -966,7 +985,7 @@ async def test_list_agents_select_fields_empty(server: SyncServer, comprehensive @pytest.mark.asyncio async def test_list_agents_select_fields_none(server: SyncServer, comprehensive_test_agent_fixture, default_user): # Create an agent using the comprehensive fixture. - created_agent, create_agent_request = comprehensive_test_agent_fixture + _created_agent, _create_agent_request = comprehensive_test_agent_fixture # List agents using an empty list for select_fields. agents = await server.agent_manager.list_agents_async(actor=default_user, include_relationships=None) @@ -983,7 +1002,7 @@ async def test_list_agents_select_fields_none(server: SyncServer, comprehensive_ @pytest.mark.asyncio async def test_list_agents_select_fields_specific(server: SyncServer, comprehensive_test_agent_fixture, default_user): - created_agent, create_agent_request = comprehensive_test_agent_fixture + _created_agent, _create_agent_request = comprehensive_test_agent_fixture # Choose a subset of valid relationship fields. valid_fields = ["tools", "tags"] @@ -1000,7 +1019,7 @@ async def test_list_agents_select_fields_specific(server: SyncServer, comprehens @pytest.mark.asyncio async def test_list_agents_select_fields_invalid(server: SyncServer, comprehensive_test_agent_fixture, default_user): - created_agent, create_agent_request = comprehensive_test_agent_fixture + _created_agent, _create_agent_request = comprehensive_test_agent_fixture # Provide field names that are not recognized. invalid_fields = ["foobar", "nonexistent_field"] @@ -1015,7 +1034,7 @@ async def test_list_agents_select_fields_invalid(server: SyncServer, comprehensi @pytest.mark.asyncio async def test_list_agents_select_fields_duplicates(server: SyncServer, comprehensive_test_agent_fixture, default_user): - created_agent, create_agent_request = comprehensive_test_agent_fixture + _created_agent, _create_agent_request = comprehensive_test_agent_fixture # Provide duplicate valid field names. duplicate_fields = ["tools", "tools", "tags", "tags"] @@ -1030,7 +1049,7 @@ async def test_list_agents_select_fields_duplicates(server: SyncServer, comprehe @pytest.mark.asyncio async def test_list_agents_select_fields_mixed(server: SyncServer, comprehensive_test_agent_fixture, default_user): - created_agent, create_agent_request = comprehensive_test_agent_fixture + _created_agent, _create_agent_request = comprehensive_test_agent_fixture # Mix valid fields with an invalid one. mixed_fields = ["tools", "invalid_field"] @@ -1047,7 +1066,7 @@ async def test_list_agents_select_fields_mixed(server: SyncServer, comprehensive @pytest.mark.asyncio async def test_list_agents_ascending(server: SyncServer, default_user): # Create two agents with known names - agent1 = await server.agent_manager.create_agent_async( + await server.agent_manager.create_agent_async( agent_create=CreateAgent( name="agent_oldest", agent_type="memgpt_v2_agent", @@ -1062,7 +1081,7 @@ async def test_list_agents_ascending(server: SyncServer, default_user): if USING_SQLITE: time.sleep(CREATE_DELAY_SQLITE) - agent2 = await server.agent_manager.create_agent_async( + await server.agent_manager.create_agent_async( agent_create=CreateAgent( name="agent_newest", agent_type="memgpt_v2_agent", @@ -1082,7 +1101,7 @@ async def test_list_agents_ascending(server: SyncServer, default_user): @pytest.mark.asyncio async def test_list_agents_descending(server: SyncServer, default_user): # Create two agents with known names - agent1 = await server.agent_manager.create_agent_async( + await server.agent_manager.create_agent_async( agent_create=CreateAgent( name="agent_oldest", agent_type="memgpt_v2_agent", @@ -1097,7 +1116,7 @@ async def test_list_agents_descending(server: SyncServer, default_user): if USING_SQLITE: time.sleep(CREATE_DELAY_SQLITE) - agent2 = await server.agent_manager.create_agent_async( + await server.agent_manager.create_agent_async( agent_create=CreateAgent( name="agent_newest", agent_type="memgpt_v2_agent", @@ -1153,7 +1172,7 @@ async def test_list_agents_by_last_stop_reason(server: SyncServer, default_user) ) # Create agent with no stop reason - agent3 = await server.agent_manager.create_agent_async( + await server.agent_manager.create_agent_async( agent_create=CreateAgent( name="agent_no_stop_reason", agent_type="memgpt_v2_agent", @@ -1241,7 +1260,7 @@ async def test_count_agents_with_filters(server: SyncServer, default_user): actor=default_user, ) - agent4 = await server.agent_manager.create_agent_async( + await server.agent_manager.create_agent_async( agent_create=CreateAgent( name="agent_no_stop_reason", agent_type="memgpt_v2_agent", @@ -1584,7 +1603,6 @@ async def test_agent_state_schema_unchanged(server: SyncServer): from letta.schemas.response_format import ResponseFormatUnion from letta.schemas.source import Source from letta.schemas.tool import Tool - from letta.schemas.tool_rule import ToolRule from letta.services.summarizer.summarizer_config import CompactionSettings # Define the expected schema structure @@ -1743,7 +1761,7 @@ async def test_agent_state_schema_unchanged(server: SyncServer): # Validate nested object schemas # Memory schema memory_fields = Memory.model_fields - expected_memory_fields = {"agent_type", "blocks", "file_blocks", "prompt_template"} + expected_memory_fields = {"agent_type", "git_enabled", "blocks", "file_blocks", "prompt_template"} actual_memory_fields = set(memory_fields.keys()) if actual_memory_fields != expected_memory_fields: pytest.fail( @@ -1858,6 +1876,9 @@ async def test_agent_state_schema_unchanged(server: SyncServer): "tier", "parallel_tool_calls", "strict", + "return_logprobs", + "top_logprobs", + "return_token_ids", } actual_llm_config_fields = set(llm_config_fields.keys()) if actual_llm_config_fields != expected_llm_config_fields: @@ -1985,6 +2006,7 @@ async def test_agent_state_relationship_loads(server: SyncServer, default_user, assert not agent_state.tools # Test include_relationships override with specific relationships + # Note: tags are always loaded alongside memory (needed for git_enabled) agent_state = await server.agent_manager.get_agent_by_id_async( agent_id=created_agent.id, actor=default_user, @@ -1992,7 +2014,7 @@ async def test_agent_state_relationship_loads(server: SyncServer, default_user, ) assert agent_state.blocks assert agent_state.sources - assert not agent_state.tags + assert agent_state.tags # tags loaded with memory for git_enabled assert not agent_state.tools # Test include override with specific relationships @@ -2004,7 +2026,7 @@ async def test_agent_state_relationship_loads(server: SyncServer, default_user, ) assert agent_state.blocks assert agent_state.sources - assert not agent_state.tags + assert agent_state.tags # tags loaded with blocks for git_enabled assert not agent_state.tools @@ -2029,14 +2051,14 @@ async def test_create_template_agent_with_files_from_sources(server: SyncServer, organization_id=default_user.organization_id, source_id=source.id, ) - file1 = await server.file_manager.create_file(file_metadata=file1_metadata, actor=default_user, text="content for file 1") + await server.file_manager.create_file(file_metadata=file1_metadata, actor=default_user, text="content for file 1") file2_metadata = PydanticFileMetadata( file_name="template_file_2.txt", organization_id=default_user.organization_id, source_id=source.id, ) - file2 = await server.file_manager.create_file(file_metadata=file2_metadata, actor=default_user, text="content for file 2") + await server.file_manager.create_file(file_metadata=file2_metadata, actor=default_user, text="content for file 2") # Create agent using InternalTemplateAgentCreate with the source create_agent_request = InternalTemplateAgentCreate( diff --git a/tests/managers/test_agent_tag_manager.py b/tests/managers/test_agent_tag_manager.py index de67136c..d0c9dd75 100644 --- a/tests/managers/test_agent_tag_manager.py +++ b/tests/managers/test_agent_tag_manager.py @@ -1,106 +1,20 @@ import asyncio -import json -import logging -import os -import random -import re -import string import time -import uuid -from datetime import datetime, timedelta, timezone -from typing import List -from unittest.mock import AsyncMock, Mock, patch import pytest -from _pytest.python_api import approx -from anthropic.types.beta import BetaMessage -from anthropic.types.beta.messages import BetaMessageBatchIndividualResponse, BetaMessageBatchSucceededResult # Import shared fixtures and constants from conftest from conftest import ( CREATE_DELAY_SQLITE, - DEFAULT_EMBEDDING_CONFIG, USING_SQLITE, ) -from openai.types.chat.chat_completion_message_tool_call import ChatCompletionMessageToolCall as OpenAIToolCall, Function as OpenAIFunction -from sqlalchemy import func, select -from sqlalchemy.exc import IntegrityError, InvalidRequestError -from sqlalchemy.orm.exc import StaleDataError -from letta.config import LettaConfig -from letta.constants import ( - BASE_MEMORY_TOOLS, - BASE_SLEEPTIME_TOOLS, - BASE_TOOLS, - BASE_VOICE_SLEEPTIME_CHAT_TOOLS, - BASE_VOICE_SLEEPTIME_TOOLS, - BUILTIN_TOOLS, - DEFAULT_ORG_ID, - DEFAULT_ORG_NAME, - FILES_TOOLS, - LETTA_TOOL_EXECUTION_DIR, - LETTA_TOOL_SET, - LOCAL_ONLY_MULTI_AGENT_TOOLS, - MCP_TOOL_TAG_NAME_PREFIX, - MULTI_AGENT_TOOLS, -) -from letta.data_sources.redis_client import NoopAsyncRedisClient, get_redis_client -from letta.errors import LettaAgentNotFoundError -from letta.functions.functions import derive_openai_json_schema, parse_source_code -from letta.functions.mcp_client.types import MCPTool -from letta.helpers import ToolRulesSolver -from letta.helpers.datetime_helpers import AsyncTimer -from letta.jobs.types import ItemUpdateInfo, RequestStatusUpdateInfo, StepStatusUpdateInfo -from letta.orm import Base, Block -from letta.orm.block_history import BlockHistory -from letta.orm.errors import NoResultFound, UniqueConstraintViolationError -from letta.orm.file import FileContent as FileContentModel, FileMetadata as FileMetadataModel from letta.schemas.agent import CreateAgent, UpdateAgent -from letta.schemas.block import Block as PydanticBlock, BlockUpdate, CreateBlock from letta.schemas.embedding_config import EmbeddingConfig -from letta.schemas.enums import ( - ActorType, - AgentStepStatus, - FileProcessingStatus, - JobStatus, - JobType, - MessageRole, - ProviderType, - SandboxType, - StepStatus, - TagMatchMode, - ToolType, - VectorDBProvider, -) -from letta.schemas.environment_variables import SandboxEnvironmentVariableCreate, SandboxEnvironmentVariableUpdate -from letta.schemas.file import FileMetadata, FileMetadata as PydanticFileMetadata -from letta.schemas.identity import IdentityCreate, IdentityProperty, IdentityPropertyType, IdentityType, IdentityUpdate, IdentityUpsert -from letta.schemas.job import BatchJob, Job, Job as PydanticJob, JobUpdate, LettaRequestConfig -from letta.schemas.letta_message import UpdateAssistantMessage, UpdateReasoningMessage, UpdateSystemMessage, UpdateUserMessage -from letta.schemas.letta_message_content import TextContent -from letta.schemas.letta_stop_reason import LettaStopReason, StopReasonType -from letta.schemas.llm_batch_job import AgentStepState, LLMBatchItem from letta.schemas.llm_config import LLMConfig -from letta.schemas.message import Message as PydanticMessage, MessageCreate, MessageUpdate -from letta.schemas.openai.chat_completion_response import UsageStatistics -from letta.schemas.organization import Organization, Organization as PydanticOrganization, OrganizationUpdate -from letta.schemas.passage import Passage as PydanticPassage -from letta.schemas.pip_requirement import PipRequirement -from letta.schemas.run import Run as PydanticRun -from letta.schemas.sandbox_config import E2BSandboxConfig, LocalSandboxConfig, SandboxConfigCreate, SandboxConfigUpdate -from letta.schemas.source import Source as PydanticSource, SourceUpdate -from letta.schemas.tool import Tool as PydanticTool, ToolCreate, ToolUpdate -from letta.schemas.tool_rule import InitToolRule -from letta.schemas.user import User as PydanticUser, UserUpdate -from letta.server.db import db_registry +from letta.schemas.organization import Organization as PydanticOrganization +from letta.schemas.user import User as PydanticUser from letta.server.server import SyncServer -from letta.services.block_manager import BlockManager -from letta.services.helpers.agent_manager_helper import calculate_base_tools, calculate_multi_agent_tools, validate_agent_exists_async -from letta.services.step_manager import FeedbackType -from letta.settings import settings, tool_settings -from letta.utils import calculate_file_defaults_based_on_context_window -from tests.helpers.utils import comprehensive_agent_checks, validate_context_window_overview -from tests.utils import random_string # ====================================================================================================================== # AgentManager Tests - Tags Relationship diff --git a/tests/managers/test_archive_manager.py b/tests/managers/test_archive_manager.py index 33241f08..d61c337c 100644 --- a/tests/managers/test_archive_manager.py +++ b/tests/managers/test_archive_manager.py @@ -1,105 +1,19 @@ -import json -import logging -import os -import random -import re -import string -import time import uuid -from datetime import datetime, timedelta, timezone -from typing import List -from unittest.mock import AsyncMock, Mock, patch import pytest -from _pytest.python_api import approx -from anthropic.types.beta import BetaMessage -from anthropic.types.beta.messages import BetaMessageBatchIndividualResponse, BetaMessageBatchSucceededResult # Import shared fixtures and constants from conftest from conftest import ( - CREATE_DELAY_SQLITE, DEFAULT_EMBEDDING_CONFIG, - USING_SQLITE, ) -from openai.types.chat.chat_completion_message_tool_call import ChatCompletionMessageToolCall as OpenAIToolCall, Function as OpenAIFunction -from sqlalchemy import func, select -from sqlalchemy.exc import IntegrityError, InvalidRequestError -from sqlalchemy.orm.exc import StaleDataError -from letta.config import LettaConfig -from letta.constants import ( - BASE_MEMORY_TOOLS, - BASE_SLEEPTIME_TOOLS, - BASE_TOOLS, - BASE_VOICE_SLEEPTIME_CHAT_TOOLS, - BASE_VOICE_SLEEPTIME_TOOLS, - BUILTIN_TOOLS, - DEFAULT_ORG_ID, - DEFAULT_ORG_NAME, - FILES_TOOLS, - LETTA_TOOL_EXECUTION_DIR, - LETTA_TOOL_SET, - LOCAL_ONLY_MULTI_AGENT_TOOLS, - MCP_TOOL_TAG_NAME_PREFIX, - MULTI_AGENT_TOOLS, -) -from letta.data_sources.redis_client import NoopAsyncRedisClient, get_redis_client from letta.errors import LettaAgentNotFoundError -from letta.functions.functions import derive_openai_json_schema, parse_source_code -from letta.functions.mcp_client.types import MCPTool -from letta.helpers import ToolRulesSolver -from letta.helpers.datetime_helpers import AsyncTimer -from letta.jobs.types import ItemUpdateInfo, RequestStatusUpdateInfo, StepStatusUpdateInfo -from letta.orm import Base, Block -from letta.orm.block_history import BlockHistory -from letta.orm.errors import NoResultFound, UniqueConstraintViolationError -from letta.orm.file import FileContent as FileContentModel, FileMetadata as FileMetadataModel -from letta.schemas.agent import AgentRelationships, AgentState, CreateAgent, UpdateAgent -from letta.schemas.block import Block as PydanticBlock, BlockUpdate, CreateBlock +from letta.orm.errors import NoResultFound +from letta.schemas.agent import CreateAgent from letta.schemas.embedding_config import EmbeddingConfig -from letta.schemas.enums import ( - ActorType, - AgentStepStatus, - FileProcessingStatus, - JobStatus, - JobType, - MessageRole, - ProviderType, - SandboxType, - StepStatus, - TagMatchMode, - ToolType, - VectorDBProvider, -) -from letta.schemas.environment_variables import SandboxEnvironmentVariableCreate, SandboxEnvironmentVariableUpdate -from letta.schemas.file import FileMetadata, FileMetadata as PydanticFileMetadata -from letta.schemas.identity import IdentityCreate, IdentityProperty, IdentityPropertyType, IdentityType, IdentityUpdate, IdentityUpsert -from letta.schemas.job import BatchJob, Job, Job as PydanticJob, JobUpdate, LettaRequestConfig -from letta.schemas.letta_message import UpdateAssistantMessage, UpdateReasoningMessage, UpdateSystemMessage, UpdateUserMessage -from letta.schemas.letta_message_content import TextContent -from letta.schemas.letta_stop_reason import LettaStopReason, StopReasonType -from letta.schemas.llm_batch_job import AgentStepState, LLMBatchItem from letta.schemas.llm_config import LLMConfig -from letta.schemas.message import Message as PydanticMessage, MessageCreate, MessageUpdate -from letta.schemas.openai.chat_completion_response import UsageStatistics -from letta.schemas.organization import Organization, Organization as PydanticOrganization, OrganizationUpdate from letta.schemas.passage import Passage as PydanticPassage -from letta.schemas.pip_requirement import PipRequirement -from letta.schemas.run import Run as PydanticRun -from letta.schemas.sandbox_config import E2BSandboxConfig, LocalSandboxConfig, SandboxConfigCreate, SandboxConfigUpdate -from letta.schemas.source import Source as PydanticSource, SourceUpdate -from letta.schemas.tool import Tool as PydanticTool, ToolCreate, ToolUpdate -from letta.schemas.tool_rule import InitToolRule -from letta.schemas.user import User as PydanticUser, UserUpdate -from letta.server.db import db_registry from letta.server.server import SyncServer -from letta.services.block_manager import BlockManager -from letta.services.helpers.agent_manager_helper import calculate_base_tools, calculate_multi_agent_tools, validate_agent_exists_async -from letta.services.step_manager import FeedbackType -from letta.settings import settings, tool_settings -from letta.utils import calculate_file_defaults_based_on_context_window -from tests.helpers.utils import comprehensive_agent_checks, validate_context_window_overview -from tests.utils import random_string # ====================================================================================================================== diff --git a/tests/managers/test_block_manager.py b/tests/managers/test_block_manager.py index b815428a..8f6408cf 100644 --- a/tests/managers/test_block_manager.py +++ b/tests/managers/test_block_manager.py @@ -1,104 +1,33 @@ -import json import logging -import os import random -import re import string import time import uuid -from datetime import datetime, timedelta, timezone -from typing import List -from unittest.mock import AsyncMock, Mock, patch import pytest -from _pytest.python_api import approx -from anthropic.types.beta import BetaMessage -from anthropic.types.beta.messages import BetaMessageBatchIndividualResponse, BetaMessageBatchSucceededResult # Import shared fixtures and constants from conftest from conftest import ( CREATE_DELAY_SQLITE, - DEFAULT_EMBEDDING_CONFIG, USING_SQLITE, ) -from openai.types.chat.chat_completion_message_tool_call import ChatCompletionMessageToolCall as OpenAIToolCall, Function as OpenAIFunction -from sqlalchemy import func, select -from sqlalchemy.exc import IntegrityError, InvalidRequestError from sqlalchemy.orm.exc import StaleDataError -from letta.config import LettaConfig -from letta.constants import ( - BASE_MEMORY_TOOLS, - BASE_SLEEPTIME_TOOLS, - BASE_TOOLS, - BASE_VOICE_SLEEPTIME_CHAT_TOOLS, - BASE_VOICE_SLEEPTIME_TOOLS, - BUILTIN_TOOLS, - DEFAULT_ORG_ID, - DEFAULT_ORG_NAME, - FILES_TOOLS, - LETTA_TOOL_EXECUTION_DIR, - LETTA_TOOL_SET, - LOCAL_ONLY_MULTI_AGENT_TOOLS, - MCP_TOOL_TAG_NAME_PREFIX, - MULTI_AGENT_TOOLS, -) -from letta.data_sources.redis_client import NoopAsyncRedisClient, get_redis_client -from letta.errors import LettaAgentNotFoundError, LettaInvalidArgumentError -from letta.functions.functions import derive_openai_json_schema, parse_source_code -from letta.functions.mcp_client.types import MCPTool -from letta.helpers import ToolRulesSolver -from letta.helpers.datetime_helpers import AsyncTimer -from letta.jobs.types import ItemUpdateInfo, RequestStatusUpdateInfo, StepStatusUpdateInfo -from letta.orm import Base, Block +from letta.errors import LettaInvalidArgumentError +from letta.orm import Block from letta.orm.block_history import BlockHistory from letta.orm.errors import NoResultFound, UniqueConstraintViolationError -from letta.orm.file import FileContent as FileContentModel, FileMetadata as FileMetadataModel -from letta.schemas.agent import CreateAgent, UpdateAgent -from letta.schemas.block import Block as PydanticBlock, BlockUpdate, CreateBlock +from letta.schemas.agent import CreateAgent +from letta.schemas.block import Block as PydanticBlock, BlockUpdate from letta.schemas.embedding_config import EmbeddingConfig from letta.schemas.enums import ( ActorType, - AgentStepStatus, - FileProcessingStatus, - JobStatus, - JobType, - MessageRole, - ProviderType, - SandboxType, - StepStatus, - TagMatchMode, - ToolType, - VectorDBProvider, ) -from letta.schemas.environment_variables import SandboxEnvironmentVariableCreate, SandboxEnvironmentVariableUpdate -from letta.schemas.file import FileMetadata, FileMetadata as PydanticFileMetadata -from letta.schemas.identity import IdentityCreate, IdentityProperty, IdentityPropertyType, IdentityType, IdentityUpdate, IdentityUpsert -from letta.schemas.job import BatchJob, Job, Job as PydanticJob, JobUpdate, LettaRequestConfig -from letta.schemas.letta_message import UpdateAssistantMessage, UpdateReasoningMessage, UpdateSystemMessage, UpdateUserMessage -from letta.schemas.letta_message_content import TextContent -from letta.schemas.letta_stop_reason import LettaStopReason, StopReasonType -from letta.schemas.llm_batch_job import AgentStepState, LLMBatchItem from letta.schemas.llm_config import LLMConfig -from letta.schemas.message import Message as PydanticMessage, MessageCreate, MessageUpdate -from letta.schemas.openai.chat_completion_response import UsageStatistics -from letta.schemas.organization import Organization, Organization as PydanticOrganization, OrganizationUpdate -from letta.schemas.passage import Passage as PydanticPassage -from letta.schemas.pip_requirement import PipRequirement -from letta.schemas.run import Run as PydanticRun -from letta.schemas.sandbox_config import E2BSandboxConfig, LocalSandboxConfig, SandboxConfigCreate, SandboxConfigUpdate -from letta.schemas.source import Source as PydanticSource, SourceUpdate -from letta.schemas.tool import Tool as PydanticTool, ToolCreate, ToolUpdate -from letta.schemas.tool_rule import InitToolRule -from letta.schemas.user import User as PydanticUser, UserUpdate +from letta.schemas.user import User as PydanticUser from letta.server.db import db_registry from letta.server.server import SyncServer from letta.services.block_manager import BlockManager -from letta.services.helpers.agent_manager_helper import calculate_base_tools, calculate_multi_agent_tools, validate_agent_exists_async -from letta.services.step_manager import FeedbackType -from letta.settings import settings, tool_settings -from letta.utils import calculate_file_defaults_based_on_context_window -from tests.helpers.utils import comprehensive_agent_checks, validate_context_window_overview from tests.utils import random_string # ====================================================================================================================== diff --git a/tests/managers/test_cancellation.py b/tests/managers/test_cancellation.py index 804fd4ee..1aca3f8c 100644 --- a/tests/managers/test_cancellation.py +++ b/tests/managers/test_cancellation.py @@ -6,8 +6,6 @@ points in the agent execution flow, covering all the issues documented in CANCEL """ import asyncio -from typing import AsyncGenerator -from unittest.mock import AsyncMock, MagicMock, patch import pytest @@ -19,8 +17,7 @@ from letta.schemas.enums import MessageRole, RunStatus from letta.schemas.letta_request import LettaStreamingRequest from letta.schemas.llm_config import LLMConfig from letta.schemas.message import MessageCreate -from letta.schemas.model import ModelSettings -from letta.schemas.run import Run as PydanticRun, RunUpdate +from letta.schemas.run import Run as PydanticRun from letta.server.server import SyncServer from letta.services.streaming_service import StreamingService @@ -323,7 +320,7 @@ class TestMessageStateDesyncIssues: print(f" background={request.background}") # Start the background streaming agent - run, stream_response = await streaming_service.create_agent_stream( + run, _stream_response = await streaming_service.create_agent_stream( agent_id=test_agent_with_tool.id, actor=default_user, request=request, @@ -513,7 +510,7 @@ class TestStreamingCancellation: try: async for chunk in cancel_during_stream(): chunks.append(chunk) - except Exception as e: + except Exception: # May raise exception on cancellation pass @@ -736,7 +733,7 @@ class TestResourceCleanupAfterCancellation: input_messages = [MessageCreate(role=MessageRole.user, content="Call print_tool with 'test'")] - result = await agent_loop.step( + await agent_loop.step( input_messages=input_messages, max_steps=5, run_id=test_run.id, @@ -898,7 +895,7 @@ class TestApprovalFlowCancellation: ) # Check for approval request messages - approval_messages = [m for m in messages_after_cancel if m.role == "approval_request"] + [m for m in messages_after_cancel if m.role == "approval_request"] # Second run: try to execute normally (should work, not stuck in approval) test_run_2 = await server.run_manager.create_run( @@ -1078,7 +1075,7 @@ class TestApprovalFlowCancellation: assert result.stop_reason.stop_reason == "requires_approval", f"Expected requires_approval, got {result.stop_reason.stop_reason}" # Get all messages from database for this run - db_messages = await server.message_manager.list_messages( + await server.message_manager.list_messages( actor=default_user, agent_id=test_agent_with_tool.id, run_id=test_run.id, @@ -1213,7 +1210,7 @@ class TestApprovalFlowCancellation: assert result.stop_reason.stop_reason == "requires_approval", f"Should stop for approval, got {result.stop_reason.stop_reason}" # Get the approval request message to see how many tool calls were made - db_messages_before_cancel = await server.message_manager.list_messages( + await server.message_manager.list_messages( actor=default_user, agent_id=agent_state.id, run_id=test_run.id, diff --git a/tests/managers/test_conversation_manager.py b/tests/managers/test_conversation_manager.py index 03329a9a..8bf2d5f8 100644 --- a/tests/managers/test_conversation_manager.py +++ b/tests/managers/test_conversation_manager.py @@ -166,6 +166,110 @@ async def test_delete_conversation(conversation_manager, server: SyncServer, sar ) +@pytest.mark.asyncio +async def test_delete_conversation_removes_from_list(conversation_manager, server: SyncServer, sarah_agent, default_user): + """Test that soft-deleted conversations are excluded from list results.""" + # Create two conversations + conv1 = await conversation_manager.create_conversation( + agent_id=sarah_agent.id, + conversation_create=CreateConversation(summary="Keep me"), + actor=default_user, + ) + conv2 = await conversation_manager.create_conversation( + agent_id=sarah_agent.id, + conversation_create=CreateConversation(summary="Delete me"), + actor=default_user, + ) + + # Delete one + await conversation_manager.delete_conversation( + conversation_id=conv2.id, + actor=default_user, + ) + + # List should only return the non-deleted conversation + conversations = await conversation_manager.list_conversations( + agent_id=sarah_agent.id, + actor=default_user, + ) + conv_ids = [c.id for c in conversations] + assert conv1.id in conv_ids + assert conv2.id not in conv_ids + + +@pytest.mark.asyncio +async def test_delete_conversation_double_delete_raises(conversation_manager, server: SyncServer, sarah_agent, default_user): + """Test that deleting an already-deleted conversation raises NoResultFound.""" + created = await conversation_manager.create_conversation( + agent_id=sarah_agent.id, + conversation_create=CreateConversation(summary="Delete me twice"), + actor=default_user, + ) + + await conversation_manager.delete_conversation( + conversation_id=created.id, + actor=default_user, + ) + + # Second delete should raise + with pytest.raises(NoResultFound): + await conversation_manager.delete_conversation( + conversation_id=created.id, + actor=default_user, + ) + + +@pytest.mark.asyncio +async def test_update_deleted_conversation_raises(conversation_manager, server: SyncServer, sarah_agent, default_user): + """Test that updating a soft-deleted conversation raises NoResultFound.""" + created = await conversation_manager.create_conversation( + agent_id=sarah_agent.id, + conversation_create=CreateConversation(summary="Original"), + actor=default_user, + ) + + await conversation_manager.delete_conversation( + conversation_id=created.id, + actor=default_user, + ) + + with pytest.raises(NoResultFound): + await conversation_manager.update_conversation( + conversation_id=created.id, + conversation_update=UpdateConversation(summary="Should fail"), + actor=default_user, + ) + + +@pytest.mark.asyncio +async def test_delete_conversation_excluded_from_summary_search(conversation_manager, server: SyncServer, sarah_agent, default_user): + """Test that soft-deleted conversations are excluded from summary search results.""" + await conversation_manager.create_conversation( + agent_id=sarah_agent.id, + conversation_create=CreateConversation(summary="alpha search term"), + actor=default_user, + ) + to_delete = await conversation_manager.create_conversation( + agent_id=sarah_agent.id, + conversation_create=CreateConversation(summary="alpha deleted term"), + actor=default_user, + ) + + await conversation_manager.delete_conversation( + conversation_id=to_delete.id, + actor=default_user, + ) + + results = await conversation_manager.list_conversations( + agent_id=sarah_agent.id, + actor=default_user, + summary_search="alpha", + ) + result_ids = [c.id for c in results] + assert to_delete.id not in result_ids + assert len(results) == 1 + + @pytest.mark.asyncio async def test_conversation_isolation_by_agent(conversation_manager, server: SyncServer, sarah_agent, charles_agent, default_user): """Test that conversations are isolated by agent.""" @@ -1037,3 +1141,173 @@ async def test_list_conversation_messages_order_with_pagination(conversation_man assert "Message 0" in page_asc[0].content # In descending, first should be "Message 4" assert "Message 4" in page_desc[0].content + + +# ====================================================================================================================== +# Model/Model Settings Override Tests +# ====================================================================================================================== + + +@pytest.mark.asyncio +async def test_create_conversation_with_model(conversation_manager, server: SyncServer, sarah_agent, default_user): + """Test creating a conversation with a model override.""" + conversation = await conversation_manager.create_conversation( + agent_id=sarah_agent.id, + conversation_create=CreateConversation(summary="Test with model override", model="openai/gpt-4o"), + actor=default_user, + ) + + assert conversation.id is not None + assert conversation.model == "openai/gpt-4o" + assert conversation.model_settings is None + + +@pytest.mark.asyncio +async def test_create_conversation_with_model_and_settings(conversation_manager, server: SyncServer, sarah_agent, default_user): + """Test creating a conversation with model and model_settings.""" + from letta.schemas.model import OpenAIModelSettings + + settings = OpenAIModelSettings(temperature=0.5) + conversation = await conversation_manager.create_conversation( + agent_id=sarah_agent.id, + conversation_create=CreateConversation( + summary="Test with settings", + model="openai/gpt-4o", + model_settings=settings, + ), + actor=default_user, + ) + + assert conversation.model == "openai/gpt-4o" + assert conversation.model_settings is not None + assert conversation.model_settings.temperature == 0.5 + + +@pytest.mark.asyncio +async def test_create_conversation_without_model_override(conversation_manager, server: SyncServer, sarah_agent, default_user): + """Test creating a conversation without model override returns None for model fields.""" + conversation = await conversation_manager.create_conversation( + agent_id=sarah_agent.id, + conversation_create=CreateConversation(summary="No override"), + actor=default_user, + ) + + assert conversation.id is not None + assert conversation.model is None + assert conversation.model_settings is None + + +@pytest.mark.asyncio +async def test_update_conversation_set_model(conversation_manager, server: SyncServer, sarah_agent, default_user): + """Test updating a conversation to add a model override.""" + # Create without override + conversation = await conversation_manager.create_conversation( + agent_id=sarah_agent.id, + conversation_create=CreateConversation(summary="Original"), + actor=default_user, + ) + assert conversation.model is None + + # Update to add override + updated = await conversation_manager.update_conversation( + conversation_id=conversation.id, + conversation_update=UpdateConversation(model="anthropic/claude-3-opus"), + actor=default_user, + ) + + assert updated.model == "anthropic/claude-3-opus" + + +@pytest.mark.asyncio +async def test_update_conversation_preserves_model(conversation_manager, server: SyncServer, sarah_agent, default_user): + """Test that updating summary preserves existing model override.""" + # Create with override + conversation = await conversation_manager.create_conversation( + agent_id=sarah_agent.id, + conversation_create=CreateConversation(summary="With override", model="openai/gpt-4o"), + actor=default_user, + ) + assert conversation.model == "openai/gpt-4o" + + # Update summary only + updated = await conversation_manager.update_conversation( + conversation_id=conversation.id, + conversation_update=UpdateConversation(summary="New summary"), + actor=default_user, + ) + + assert updated.summary == "New summary" + assert updated.model == "openai/gpt-4o" + + +@pytest.mark.asyncio +async def test_retrieve_conversation_includes_model(conversation_manager, server: SyncServer, sarah_agent, default_user): + """Test that retrieving a conversation includes model/model_settings.""" + from letta.schemas.model import OpenAIModelSettings + + created = await conversation_manager.create_conversation( + agent_id=sarah_agent.id, + conversation_create=CreateConversation( + summary="Retrieve test", + model="openai/gpt-4o", + model_settings=OpenAIModelSettings(temperature=0.7), + ), + actor=default_user, + ) + + retrieved = await conversation_manager.get_conversation_by_id( + conversation_id=created.id, + actor=default_user, + ) + + assert retrieved.model == "openai/gpt-4o" + assert retrieved.model_settings is not None + assert retrieved.model_settings.temperature == 0.7 + + +@pytest.mark.asyncio +async def test_list_conversations_includes_model(conversation_manager, server: SyncServer, sarah_agent, default_user): + """Test that listing conversations includes model fields.""" + await conversation_manager.create_conversation( + agent_id=sarah_agent.id, + conversation_create=CreateConversation(summary="List test", model="openai/gpt-4o"), + actor=default_user, + ) + + conversations = await conversation_manager.list_conversations( + agent_id=sarah_agent.id, + actor=default_user, + ) + + assert len(conversations) >= 1 + conv_with_model = [c for c in conversations if c.summary == "List test"] + assert len(conv_with_model) == 1 + assert conv_with_model[0].model == "openai/gpt-4o" + + +@pytest.mark.asyncio +async def test_create_conversation_schema_model_validation(): + """Test that CreateConversation validates model handle format.""" + from letta.errors import LettaInvalidArgumentError + + # Valid format should work + create = CreateConversation(model="openai/gpt-4o") + assert create.model == "openai/gpt-4o" + + # Invalid format should raise + with pytest.raises(LettaInvalidArgumentError): + CreateConversation(model="invalid-no-slash") + + +@pytest.mark.asyncio +async def test_update_conversation_schema_model_validation(): + """Test that UpdateConversation validates model handle format.""" + from letta.errors import LettaInvalidArgumentError + + # Valid format should work + update = UpdateConversation(model="anthropic/claude-3-opus") + assert update.model == "anthropic/claude-3-opus" + + # Invalid format should raise + with pytest.raises(LettaInvalidArgumentError): + UpdateConversation(model="no-slash") diff --git a/tests/managers/test_file_manager.py b/tests/managers/test_file_manager.py index 7a1284b8..79760a12 100644 --- a/tests/managers/test_file_manager.py +++ b/tests/managers/test_file_manager.py @@ -1,106 +1,15 @@ import asyncio -import json -import logging -import os -import random -import re -import string import time -import uuid -from datetime import datetime, timedelta, timezone -from typing import List -from unittest.mock import AsyncMock, Mock, patch import pytest -from _pytest.python_api import approx -from anthropic.types.beta import BetaMessage -from anthropic.types.beta.messages import BetaMessageBatchIndividualResponse, BetaMessageBatchSucceededResult # Import shared fixtures and constants from conftest from conftest import ( CREATE_DELAY_SQLITE, - DEFAULT_EMBEDDING_CONFIG, USING_SQLITE, ) -from openai.types.chat.chat_completion_message_tool_call import ChatCompletionMessageToolCall as OpenAIToolCall, Function as OpenAIFunction -from sqlalchemy import func, select -from sqlalchemy.exc import IntegrityError, InvalidRequestError -from sqlalchemy.orm.exc import StaleDataError -from letta.config import LettaConfig -from letta.constants import ( - BASE_MEMORY_TOOLS, - BASE_SLEEPTIME_TOOLS, - BASE_TOOLS, - BASE_VOICE_SLEEPTIME_CHAT_TOOLS, - BASE_VOICE_SLEEPTIME_TOOLS, - BUILTIN_TOOLS, - DEFAULT_ORG_ID, - DEFAULT_ORG_NAME, - FILES_TOOLS, - LETTA_TOOL_EXECUTION_DIR, - LETTA_TOOL_SET, - LOCAL_ONLY_MULTI_AGENT_TOOLS, - MCP_TOOL_TAG_NAME_PREFIX, - MULTI_AGENT_TOOLS, -) -from letta.data_sources.redis_client import NoopAsyncRedisClient, get_redis_client -from letta.errors import LettaAgentNotFoundError -from letta.functions.functions import derive_openai_json_schema, parse_source_code -from letta.functions.mcp_client.types import MCPTool -from letta.helpers import ToolRulesSolver -from letta.helpers.datetime_helpers import AsyncTimer -from letta.jobs.types import ItemUpdateInfo, RequestStatusUpdateInfo, StepStatusUpdateInfo -from letta.orm import Base, Block -from letta.orm.block_history import BlockHistory -from letta.orm.errors import NoResultFound, UniqueConstraintViolationError -from letta.orm.file import FileContent as FileContentModel, FileMetadata as FileMetadataModel -from letta.schemas.agent import CreateAgent, UpdateAgent -from letta.schemas.block import Block as PydanticBlock, BlockUpdate, CreateBlock -from letta.schemas.embedding_config import EmbeddingConfig -from letta.schemas.enums import ( - ActorType, - AgentStepStatus, - FileProcessingStatus, - JobStatus, - JobType, - MessageRole, - ProviderType, - SandboxType, - StepStatus, - TagMatchMode, - ToolType, - VectorDBProvider, -) -from letta.schemas.environment_variables import SandboxEnvironmentVariableCreate, SandboxEnvironmentVariableUpdate -from letta.schemas.file import FileMetadata, FileMetadata as PydanticFileMetadata -from letta.schemas.identity import IdentityCreate, IdentityProperty, IdentityPropertyType, IdentityType, IdentityUpdate, IdentityUpsert -from letta.schemas.job import BatchJob, Job, Job as PydanticJob, JobUpdate, LettaRequestConfig -from letta.schemas.letta_message import UpdateAssistantMessage, UpdateReasoningMessage, UpdateSystemMessage, UpdateUserMessage -from letta.schemas.letta_message_content import TextContent -from letta.schemas.letta_stop_reason import LettaStopReason, StopReasonType -from letta.schemas.llm_batch_job import AgentStepState, LLMBatchItem -from letta.schemas.llm_config import LLMConfig -from letta.schemas.message import Message as PydanticMessage, MessageCreate, MessageUpdate -from letta.schemas.openai.chat_completion_response import UsageStatistics -from letta.schemas.organization import Organization, Organization as PydanticOrganization, OrganizationUpdate -from letta.schemas.passage import Passage as PydanticPassage -from letta.schemas.pip_requirement import PipRequirement -from letta.schemas.run import Run as PydanticRun -from letta.schemas.sandbox_config import E2BSandboxConfig, LocalSandboxConfig, SandboxConfigCreate, SandboxConfigUpdate -from letta.schemas.source import Source as PydanticSource, SourceUpdate -from letta.schemas.tool import Tool as PydanticTool, ToolCreate, ToolUpdate -from letta.schemas.tool_rule import InitToolRule -from letta.schemas.user import User as PydanticUser, UserUpdate -from letta.server.db import db_registry -from letta.server.server import SyncServer -from letta.services.block_manager import BlockManager -from letta.services.helpers.agent_manager_helper import calculate_base_tools, calculate_multi_agent_tools, validate_agent_exists_async -from letta.services.step_manager import FeedbackType -from letta.settings import settings, tool_settings -from letta.utils import calculate_file_defaults_based_on_context_window -from tests.helpers.utils import comprehensive_agent_checks, validate_context_window_overview -from tests.utils import random_string +from letta.schemas.file import FileMetadata as PydanticFileMetadata # ====================================================================================================================== # FileAgent Tests @@ -109,7 +18,7 @@ from tests.utils import random_string @pytest.mark.asyncio async def test_attach_creates_association(server, default_user, sarah_agent, default_file): - assoc, closed_files = await server.file_agent_manager.attach_file( + assoc, _closed_files = await server.file_agent_manager.attach_file( agent_id=sarah_agent.id, file_id=default_file.id, file_name=default_file.file_name, @@ -131,7 +40,7 @@ async def test_attach_creates_association(server, default_user, sarah_agent, def async def test_attach_is_idempotent(server, default_user, sarah_agent, default_file): - a1, closed_files = await server.file_agent_manager.attach_file( + a1, _closed_files = await server.file_agent_manager.attach_file( agent_id=sarah_agent.id, file_id=default_file.id, file_name=default_file.file_name, @@ -142,7 +51,7 @@ async def test_attach_is_idempotent(server, default_user, sarah_agent, default_f ) # second attach with different params - a2, closed_files = await server.file_agent_manager.attach_file( + a2, _closed_files = await server.file_agent_manager.attach_file( agent_id=sarah_agent.id, file_id=default_file.id, file_name=default_file.file_name, @@ -205,7 +114,7 @@ async def test_file_agent_line_tracking(server, default_user, sarah_agent, defau file = await server.file_manager.create_file(file_metadata=file_metadata, actor=default_user, text=test_content) # Test opening with line range using enforce_max_open_files_and_open - closed_files, was_already_open, previous_ranges = await server.file_agent_manager.enforce_max_open_files_and_open( + _closed_files, _was_already_open, previous_ranges = await server.file_agent_manager.enforce_max_open_files_and_open( agent_id=sarah_agent.id, file_id=file.id, file_name=file.file_name, @@ -229,7 +138,7 @@ async def test_file_agent_line_tracking(server, default_user, sarah_agent, defau assert previous_ranges == {} # No previous range since it wasn't open before # Test opening without line range - should clear line info and capture previous range - closed_files, was_already_open, previous_ranges = await server.file_agent_manager.enforce_max_open_files_and_open( + _closed_files, _was_already_open, previous_ranges = await server.file_agent_manager.enforce_max_open_files_and_open( agent_id=sarah_agent.id, file_id=file.id, file_name=file.file_name, @@ -412,7 +321,7 @@ async def test_list_files_for_agent_paginated_filter_open( ) # get only open files - open_files, cursor, has_more = await server.file_agent_manager.list_files_for_agent_paginated( + open_files, _cursor, has_more = await server.file_agent_manager.list_files_for_agent_paginated( agent_id=sarah_agent.id, actor=default_user, is_open=True, @@ -461,7 +370,7 @@ async def test_list_files_for_agent_paginated_filter_closed( assert all(not fa.is_open for fa in page1) # get second page of closed files - page2, cursor2, has_more2 = await server.file_agent_manager.list_files_for_agent_paginated( + page2, _cursor2, has_more2 = await server.file_agent_manager.list_files_for_agent_paginated( agent_id=sarah_agent.id, actor=default_user, is_open=False, @@ -677,7 +586,7 @@ async def test_mark_access_bulk(server, default_user, sarah_agent, default_sourc # Attach all files (they'll be open by default) attached_files = [] for file in files: - file_agent, closed_files = await server.file_agent_manager.attach_file( + file_agent, _closed_files = await server.file_agent_manager.attach_file( agent_id=sarah_agent.id, file_id=file.id, file_name=file.file_name, @@ -836,7 +745,7 @@ async def test_lru_eviction_on_open_file(server, default_user, sarah_agent, defa time.sleep(0.1) # Now "open" the last file using the efficient method - closed_files, was_already_open, _ = await server.file_agent_manager.enforce_max_open_files_and_open( + closed_files, _was_already_open, _ = await server.file_agent_manager.enforce_max_open_files_and_open( agent_id=sarah_agent.id, file_id=files[-1].id, file_name=files[-1].file_name, @@ -944,7 +853,7 @@ async def test_last_accessed_at_updates_correctly(server, default_user, sarah_ag ) file = await server.file_manager.create_file(file_metadata=file_metadata, actor=default_user, text="test content") - file_agent, closed_files = await server.file_agent_manager.attach_file( + file_agent, _closed_files = await server.file_agent_manager.attach_file( agent_id=sarah_agent.id, file_id=file.id, file_name=file.file_name, @@ -1048,7 +957,7 @@ async def test_attach_files_bulk_deduplication(server, default_user, sarah_agent visible_content_map = {"duplicate_test.txt": "visible content"} # Bulk attach should deduplicate - closed_files = await server.file_agent_manager.attach_files_bulk( + await server.file_agent_manager.attach_files_bulk( agent_id=sarah_agent.id, files_metadata=files_to_attach, visible_content_map=visible_content_map, @@ -1176,7 +1085,7 @@ async def test_attach_files_bulk_mixed_existing_new(server, default_user, sarah_ new_files.append(file) # Bulk attach: existing file + new files - files_to_attach = [existing_file] + new_files + files_to_attach = [existing_file, *new_files] visible_content_map = { "existing_file.txt": "updated content", "new_file_0.txt": "new content 0", diff --git a/tests/managers/test_group_manager.py b/tests/managers/test_group_manager.py index 47a94df3..d6f59d5a 100644 --- a/tests/managers/test_group_manager.py +++ b/tests/managers/test_group_manager.py @@ -1,105 +1,9 @@ -import json -import logging -import os -import random -import re -import string -import time -import uuid -from datetime import datetime, timedelta, timezone -from typing import List -from unittest.mock import AsyncMock, Mock, patch - import pytest -from _pytest.python_api import approx -from anthropic.types.beta import BetaMessage -from anthropic.types.beta.messages import BetaMessageBatchIndividualResponse, BetaMessageBatchSucceededResult # Import shared fixtures and constants from conftest -from conftest import ( - CREATE_DELAY_SQLITE, - DEFAULT_EMBEDDING_CONFIG, - USING_SQLITE, -) -from openai.types.chat.chat_completion_message_tool_call import ChatCompletionMessageToolCall as OpenAIToolCall, Function as OpenAIFunction -from sqlalchemy import func, select -from sqlalchemy.exc import IntegrityError, InvalidRequestError -from sqlalchemy.orm.exc import StaleDataError - -from letta.config import LettaConfig -from letta.constants import ( - BASE_MEMORY_TOOLS, - BASE_SLEEPTIME_TOOLS, - BASE_TOOLS, - BASE_VOICE_SLEEPTIME_CHAT_TOOLS, - BASE_VOICE_SLEEPTIME_TOOLS, - BUILTIN_TOOLS, - DEFAULT_ORG_ID, - DEFAULT_ORG_NAME, - FILES_TOOLS, - LETTA_TOOL_EXECUTION_DIR, - LETTA_TOOL_SET, - LOCAL_ONLY_MULTI_AGENT_TOOLS, - MCP_TOOL_TAG_NAME_PREFIX, - MULTI_AGENT_TOOLS, -) -from letta.data_sources.redis_client import NoopAsyncRedisClient, get_redis_client -from letta.errors import LettaAgentNotFoundError -from letta.functions.functions import derive_openai_json_schema, parse_source_code -from letta.functions.mcp_client.types import MCPTool -from letta.helpers import ToolRulesSolver -from letta.helpers.datetime_helpers import AsyncTimer -from letta.jobs.types import ItemUpdateInfo, RequestStatusUpdateInfo, StepStatusUpdateInfo -from letta.orm import Base, Block -from letta.orm.block_history import BlockHistory -from letta.orm.errors import NoResultFound, UniqueConstraintViolationError -from letta.orm.file import FileContent as FileContentModel, FileMetadata as FileMetadataModel -from letta.schemas.agent import CreateAgent, UpdateAgent -from letta.schemas.block import Block as PydanticBlock, BlockUpdate, CreateBlock from letta.schemas.embedding_config import EmbeddingConfig -from letta.schemas.enums import ( - ActorType, - AgentStepStatus, - FileProcessingStatus, - JobStatus, - JobType, - MessageRole, - ProviderType, - SandboxType, - StepStatus, - TagMatchMode, - ToolType, - VectorDBProvider, -) -from letta.schemas.environment_variables import SandboxEnvironmentVariableCreate, SandboxEnvironmentVariableUpdate -from letta.schemas.file import FileMetadata, FileMetadata as PydanticFileMetadata -from letta.schemas.identity import IdentityCreate, IdentityProperty, IdentityPropertyType, IdentityType, IdentityUpdate, IdentityUpsert -from letta.schemas.job import BatchJob, Job, Job as PydanticJob, JobUpdate, LettaRequestConfig -from letta.schemas.letta_message import UpdateAssistantMessage, UpdateReasoningMessage, UpdateSystemMessage, UpdateUserMessage -from letta.schemas.letta_message_content import TextContent -from letta.schemas.letta_stop_reason import LettaStopReason, StopReasonType -from letta.schemas.llm_batch_job import AgentStepState, LLMBatchItem from letta.schemas.llm_config import LLMConfig -from letta.schemas.message import Message as PydanticMessage, MessageCreate, MessageUpdate -from letta.schemas.openai.chat_completion_response import UsageStatistics -from letta.schemas.organization import Organization, Organization as PydanticOrganization, OrganizationUpdate -from letta.schemas.passage import Passage as PydanticPassage -from letta.schemas.pip_requirement import PipRequirement -from letta.schemas.run import Run as PydanticRun -from letta.schemas.sandbox_config import E2BSandboxConfig, LocalSandboxConfig, SandboxConfigCreate, SandboxConfigUpdate -from letta.schemas.source import Source as PydanticSource, SourceUpdate -from letta.schemas.tool import Tool as PydanticTool, ToolCreate, ToolUpdate -from letta.schemas.tool_rule import InitToolRule -from letta.schemas.user import User as PydanticUser, UserUpdate -from letta.server.db import db_registry from letta.server.server import SyncServer -from letta.services.block_manager import BlockManager -from letta.services.helpers.agent_manager_helper import calculate_base_tools, calculate_multi_agent_tools, validate_agent_exists_async -from letta.services.step_manager import FeedbackType -from letta.settings import settings, tool_settings -from letta.utils import calculate_file_defaults_based_on_context_window -from tests.helpers.utils import comprehensive_agent_checks, validate_context_window_overview -from tests.utils import random_string @pytest.mark.asyncio diff --git a/tests/managers/test_identity_manager.py b/tests/managers/test_identity_manager.py index 9534e306..beca8049 100644 --- a/tests/managers/test_identity_manager.py +++ b/tests/managers/test_identity_manager.py @@ -1,105 +1,14 @@ -import json -import logging -import os -import random -import re -import string -import time -import uuid -from datetime import datetime, timedelta, timezone -from typing import List -from unittest.mock import AsyncMock, Mock, patch - import pytest -from _pytest.python_api import approx -from anthropic.types.beta import BetaMessage -from anthropic.types.beta.messages import BetaMessageBatchIndividualResponse, BetaMessageBatchSucceededResult # Import shared fixtures and constants from conftest -from conftest import ( - CREATE_DELAY_SQLITE, - DEFAULT_EMBEDDING_CONFIG, - USING_SQLITE, -) -from openai.types.chat.chat_completion_message_tool_call import ChatCompletionMessageToolCall as OpenAIToolCall, Function as OpenAIFunction -from sqlalchemy import func, select -from sqlalchemy.exc import IntegrityError, InvalidRequestError -from sqlalchemy.orm.exc import StaleDataError - -from letta.config import LettaConfig -from letta.constants import ( - BASE_MEMORY_TOOLS, - BASE_SLEEPTIME_TOOLS, - BASE_TOOLS, - BASE_VOICE_SLEEPTIME_CHAT_TOOLS, - BASE_VOICE_SLEEPTIME_TOOLS, - BUILTIN_TOOLS, - DEFAULT_ORG_ID, - DEFAULT_ORG_NAME, - FILES_TOOLS, - LETTA_TOOL_EXECUTION_DIR, - LETTA_TOOL_SET, - LOCAL_ONLY_MULTI_AGENT_TOOLS, - MCP_TOOL_TAG_NAME_PREFIX, - MULTI_AGENT_TOOLS, -) -from letta.data_sources.redis_client import NoopAsyncRedisClient, get_redis_client -from letta.errors import LettaAgentNotFoundError -from letta.functions.functions import derive_openai_json_schema, parse_source_code -from letta.functions.mcp_client.types import MCPTool -from letta.helpers import ToolRulesSolver -from letta.helpers.datetime_helpers import AsyncTimer -from letta.jobs.types import ItemUpdateInfo, RequestStatusUpdateInfo, StepStatusUpdateInfo -from letta.orm import Base, Block -from letta.orm.block_history import BlockHistory -from letta.orm.errors import NoResultFound, UniqueConstraintViolationError -from letta.orm.file import FileContent as FileContentModel, FileMetadata as FileMetadataModel +from letta.orm.errors import UniqueConstraintViolationError from letta.schemas.agent import CreateAgent, UpdateAgent -from letta.schemas.block import Block as PydanticBlock, BlockUpdate, CreateBlock +from letta.schemas.block import Block as PydanticBlock from letta.schemas.embedding_config import EmbeddingConfig -from letta.schemas.enums import ( - ActorType, - AgentStepStatus, - FileProcessingStatus, - JobStatus, - JobType, - MessageRole, - ProviderType, - SandboxType, - StepStatus, - TagMatchMode, - ToolType, - VectorDBProvider, -) -from letta.schemas.environment_variables import SandboxEnvironmentVariableCreate, SandboxEnvironmentVariableUpdate -from letta.schemas.file import FileMetadata, FileMetadata as PydanticFileMetadata from letta.schemas.identity import IdentityCreate, IdentityProperty, IdentityPropertyType, IdentityType, IdentityUpdate, IdentityUpsert -from letta.schemas.job import BatchJob, Job, Job as PydanticJob, JobUpdate, LettaRequestConfig -from letta.schemas.letta_message import UpdateAssistantMessage, UpdateReasoningMessage, UpdateSystemMessage, UpdateUserMessage -from letta.schemas.letta_message_content import TextContent -from letta.schemas.letta_stop_reason import LettaStopReason, StopReasonType -from letta.schemas.llm_batch_job import AgentStepState, LLMBatchItem from letta.schemas.llm_config import LLMConfig -from letta.schemas.message import Message as PydanticMessage, MessageCreate, MessageUpdate -from letta.schemas.openai.chat_completion_response import UsageStatistics -from letta.schemas.organization import Organization, Organization as PydanticOrganization, OrganizationUpdate -from letta.schemas.passage import Passage as PydanticPassage -from letta.schemas.pip_requirement import PipRequirement -from letta.schemas.run import Run as PydanticRun -from letta.schemas.sandbox_config import E2BSandboxConfig, LocalSandboxConfig, SandboxConfigCreate, SandboxConfigUpdate -from letta.schemas.source import Source as PydanticSource, SourceUpdate -from letta.schemas.tool import Tool as PydanticTool, ToolCreate, ToolUpdate -from letta.schemas.tool_rule import InitToolRule -from letta.schemas.user import User as PydanticUser, UserUpdate -from letta.server.db import db_registry from letta.server.server import SyncServer from letta.services.block_manager import BlockManager -from letta.services.helpers.agent_manager_helper import calculate_base_tools, calculate_multi_agent_tools, validate_agent_exists_async -from letta.services.step_manager import FeedbackType -from letta.settings import settings, tool_settings -from letta.utils import calculate_file_defaults_based_on_context_window -from tests.helpers.utils import comprehensive_agent_checks, validate_context_window_overview -from tests.utils import random_string # ====================================================================================================================== # Identity Manager Tests @@ -397,31 +306,3 @@ async def test_get_set_blocks_for_identities(server: SyncServer, default_block, assert block_without_identity.id not in block_ids await server.identity_manager.delete_identity_async(identity_id=identity.id, actor=default_user) - - -async def test_upsert_properties(server: SyncServer, default_user): - identity_create = IdentityCreate( - identifier_key="1234", - name="caren", - identity_type=IdentityType.user, - properties=[ - IdentityProperty(key="email", value="caren@letta.com", type=IdentityPropertyType.string), - IdentityProperty(key="age", value=28, type=IdentityPropertyType.number), - ], - ) - - identity = await server.identity_manager.create_identity_async(identity_create, actor=default_user) - properties = [ - IdentityProperty(key="email", value="caren@gmail.com", type=IdentityPropertyType.string), - IdentityProperty(key="age", value="28", type=IdentityPropertyType.string), - IdentityProperty(key="test", value=123, type=IdentityPropertyType.number), - ] - - updated_identity = await server.identity_manager.upsert_identity_properties_async( - identity_id=identity.id, - properties=properties, - actor=default_user, - ) - assert updated_identity.properties == properties - - await server.identity_manager.delete_identity_async(identity_id=identity.id, actor=default_user) diff --git a/tests/managers/test_job_manager.py b/tests/managers/test_job_manager.py index 9c0d8d59..aa55a0cb 100644 --- a/tests/managers/test_job_manager.py +++ b/tests/managers/test_job_manager.py @@ -1,105 +1,15 @@ -import json -import logging -import os -import random -import re -import string -import time -import uuid -from datetime import datetime, timedelta, timezone -from typing import List -from unittest.mock import AsyncMock, Mock, patch +from datetime import datetime import pytest -from _pytest.python_api import approx -from anthropic.types.beta import BetaMessage -from anthropic.types.beta.messages import BetaMessageBatchIndividualResponse, BetaMessageBatchSucceededResult # Import shared fixtures and constants from conftest -from conftest import ( - CREATE_DELAY_SQLITE, - DEFAULT_EMBEDDING_CONFIG, - USING_SQLITE, -) -from openai.types.chat.chat_completion_message_tool_call import ChatCompletionMessageToolCall as OpenAIToolCall, Function as OpenAIFunction -from sqlalchemy import func, select -from sqlalchemy.exc import IntegrityError, InvalidRequestError -from sqlalchemy.orm.exc import StaleDataError - -from letta.config import LettaConfig -from letta.constants import ( - BASE_MEMORY_TOOLS, - BASE_SLEEPTIME_TOOLS, - BASE_TOOLS, - BASE_VOICE_SLEEPTIME_CHAT_TOOLS, - BASE_VOICE_SLEEPTIME_TOOLS, - BUILTIN_TOOLS, - DEFAULT_ORG_ID, - DEFAULT_ORG_NAME, - FILES_TOOLS, - LETTA_TOOL_EXECUTION_DIR, - LETTA_TOOL_SET, - LOCAL_ONLY_MULTI_AGENT_TOOLS, - MCP_TOOL_TAG_NAME_PREFIX, - MULTI_AGENT_TOOLS, -) -from letta.data_sources.redis_client import NoopAsyncRedisClient, get_redis_client -from letta.errors import LettaAgentNotFoundError, LettaInvalidArgumentError -from letta.functions.functions import derive_openai_json_schema, parse_source_code -from letta.functions.mcp_client.types import MCPTool -from letta.helpers import ToolRulesSolver -from letta.helpers.datetime_helpers import AsyncTimer -from letta.jobs.types import ItemUpdateInfo, RequestStatusUpdateInfo, StepStatusUpdateInfo -from letta.orm import Base, Block -from letta.orm.block_history import BlockHistory -from letta.orm.errors import NoResultFound, UniqueConstraintViolationError -from letta.orm.file import FileContent as FileContentModel, FileMetadata as FileMetadataModel -from letta.schemas.agent import CreateAgent, UpdateAgent -from letta.schemas.block import Block as PydanticBlock, BlockUpdate, CreateBlock -from letta.schemas.embedding_config import EmbeddingConfig +from letta.errors import LettaInvalidArgumentError from letta.schemas.enums import ( - ActorType, - AgentStepStatus, - FileProcessingStatus, JobStatus, JobType, - MessageRole, - ProviderType, - SandboxType, - StepStatus, - TagMatchMode, - ToolType, - VectorDBProvider, ) -from letta.schemas.environment_variables import SandboxEnvironmentVariableCreate, SandboxEnvironmentVariableUpdate -from letta.schemas.file import FileMetadata, FileMetadata as PydanticFileMetadata -from letta.schemas.identity import IdentityCreate, IdentityProperty, IdentityPropertyType, IdentityType, IdentityUpdate, IdentityUpsert -from letta.schemas.job import BatchJob, Job, Job as PydanticJob, JobUpdate, LettaRequestConfig -from letta.schemas.letta_message import UpdateAssistantMessage, UpdateReasoningMessage, UpdateSystemMessage, UpdateUserMessage -from letta.schemas.letta_message_content import TextContent -from letta.schemas.letta_stop_reason import LettaStopReason, StopReasonType -from letta.schemas.llm_batch_job import AgentStepState, LLMBatchItem -from letta.schemas.llm_config import LLMConfig -from letta.schemas.message import Message as PydanticMessage, MessageCreate, MessageUpdate -from letta.schemas.openai.chat_completion_response import UsageStatistics -from letta.schemas.organization import Organization, Organization as PydanticOrganization, OrganizationUpdate -from letta.schemas.passage import Passage as PydanticPassage -from letta.schemas.pip_requirement import PipRequirement -from letta.schemas.run import Run as PydanticRun -from letta.schemas.sandbox_config import E2BSandboxConfig, LocalSandboxConfig, SandboxConfigCreate, SandboxConfigUpdate -from letta.schemas.source import Source as PydanticSource, SourceUpdate -from letta.schemas.tool import Tool as PydanticTool, ToolCreate, ToolUpdate -from letta.schemas.tool_rule import InitToolRule -from letta.schemas.user import User as PydanticUser, UserUpdate -from letta.server.db import db_registry +from letta.schemas.job import Job as PydanticJob, JobUpdate from letta.server.server import SyncServer -from letta.services.block_manager import BlockManager -from letta.services.helpers.agent_manager_helper import calculate_base_tools, calculate_multi_agent_tools, validate_agent_exists_async -from letta.services.step_manager import FeedbackType -from letta.settings import settings, tool_settings -from letta.utils import calculate_file_defaults_based_on_context_window -from tests.helpers.utils import comprehensive_agent_checks, validate_context_window_overview -from tests.utils import random_string # ====================================================================================================================== # JobManager Tests diff --git a/tests/managers/test_mcp_manager.py b/tests/managers/test_mcp_manager.py index ba36820d..fb27404b 100644 --- a/tests/managers/test_mcp_manager.py +++ b/tests/managers/test_mcp_manager.py @@ -1,105 +1,18 @@ -import json -import logging -import os -import random -import re -import string -import time import uuid -from datetime import datetime, timedelta, timezone -from typing import List -from unittest.mock import AsyncMock, Mock, patch +from unittest.mock import AsyncMock, patch import pytest -from _pytest.python_api import approx -from anthropic.types.beta import BetaMessage -from anthropic.types.beta.messages import BetaMessageBatchIndividualResponse, BetaMessageBatchSucceededResult # Import shared fixtures and constants from conftest -from conftest import ( - CREATE_DELAY_SQLITE, - DEFAULT_EMBEDDING_CONFIG, - USING_SQLITE, -) -from openai.types.chat.chat_completion_message_tool_call import ChatCompletionMessageToolCall as OpenAIToolCall, Function as OpenAIFunction -from sqlalchemy import func, select -from sqlalchemy.exc import IntegrityError, InvalidRequestError -from sqlalchemy.orm.exc import StaleDataError - -from letta.config import LettaConfig from letta.constants import ( - BASE_MEMORY_TOOLS, - BASE_SLEEPTIME_TOOLS, - BASE_TOOLS, - BASE_VOICE_SLEEPTIME_CHAT_TOOLS, - BASE_VOICE_SLEEPTIME_TOOLS, - BUILTIN_TOOLS, - DEFAULT_ORG_ID, - DEFAULT_ORG_NAME, - FILES_TOOLS, - LETTA_TOOL_EXECUTION_DIR, - LETTA_TOOL_SET, - LOCAL_ONLY_MULTI_AGENT_TOOLS, MCP_TOOL_TAG_NAME_PREFIX, - MULTI_AGENT_TOOLS, ) -from letta.data_sources.redis_client import NoopAsyncRedisClient, get_redis_client -from letta.errors import LettaAgentNotFoundError -from letta.functions.functions import derive_openai_json_schema, parse_source_code from letta.functions.mcp_client.types import MCPTool -from letta.helpers import ToolRulesSolver -from letta.helpers.datetime_helpers import AsyncTimer -from letta.jobs.types import ItemUpdateInfo, RequestStatusUpdateInfo, StepStatusUpdateInfo -from letta.orm import Base, Block -from letta.orm.block_history import BlockHistory -from letta.orm.errors import NoResultFound, UniqueConstraintViolationError -from letta.orm.file import FileContent as FileContentModel, FileMetadata as FileMetadataModel -from letta.schemas.agent import CreateAgent, UpdateAgent -from letta.schemas.block import Block as PydanticBlock, BlockUpdate, CreateBlock -from letta.schemas.embedding_config import EmbeddingConfig from letta.schemas.enums import ( - ActorType, - AgentStepStatus, - FileProcessingStatus, - JobStatus, - JobType, - MessageRole, - ProviderType, - SandboxType, - StepStatus, - TagMatchMode, ToolType, - VectorDBProvider, ) -from letta.schemas.environment_variables import SandboxEnvironmentVariableCreate, SandboxEnvironmentVariableUpdate -from letta.schemas.file import FileMetadata, FileMetadata as PydanticFileMetadata -from letta.schemas.identity import IdentityCreate, IdentityProperty, IdentityPropertyType, IdentityType, IdentityUpdate, IdentityUpsert -from letta.schemas.job import BatchJob, Job, Job as PydanticJob, JobUpdate, LettaRequestConfig -from letta.schemas.letta_message import UpdateAssistantMessage, UpdateReasoningMessage, UpdateSystemMessage, UpdateUserMessage -from letta.schemas.letta_message_content import TextContent -from letta.schemas.letta_stop_reason import LettaStopReason, StopReasonType -from letta.schemas.llm_batch_job import AgentStepState, LLMBatchItem -from letta.schemas.llm_config import LLMConfig -from letta.schemas.message import Message as PydanticMessage, MessageCreate, MessageUpdate -from letta.schemas.openai.chat_completion_response import UsageStatistics -from letta.schemas.organization import Organization, Organization as PydanticOrganization, OrganizationUpdate -from letta.schemas.passage import Passage as PydanticPassage -from letta.schemas.pip_requirement import PipRequirement -from letta.schemas.run import Run as PydanticRun -from letta.schemas.sandbox_config import E2BSandboxConfig, LocalSandboxConfig, SandboxConfigCreate, SandboxConfigUpdate -from letta.schemas.source import Source as PydanticSource, SourceUpdate -from letta.schemas.tool import Tool as PydanticTool, ToolCreate, ToolUpdate -from letta.schemas.tool_rule import InitToolRule -from letta.schemas.user import User as PydanticUser, UserUpdate from letta.server.db import db_registry -from letta.server.server import SyncServer -from letta.services.block_manager import BlockManager -from letta.services.helpers.agent_manager_helper import calculate_base_tools, calculate_multi_agent_tools, validate_agent_exists_async -from letta.services.step_manager import FeedbackType -from letta.settings import settings, tool_settings -from letta.utils import calculate_file_defaults_based_on_context_window -from tests.helpers.utils import comprehensive_agent_checks, validate_context_window_overview -from tests.utils import random_string +from letta.settings import settings # ====================================================================================================================== # MCPManager Tests @@ -158,7 +71,7 @@ async def test_create_mcp_server(mock_get_client, server, default_user): # Test with a valid SSEServerConfig mcp_server_name = "coingecko" server_url = "https://mcp.api.coingecko.com/sse" - sse_mcp_config = SSEServerConfig(server_name=mcp_server_name, server_url=server_url) + SSEServerConfig(server_name=mcp_server_name, server_url=server_url) mcp_sse_server = MCPServer(server_name=mcp_server_name, server_type=MCPServerType.SSE, server_url=server_url) created_server = await server.mcp_manager.create_or_update_mcp_server(mcp_sse_server, actor=default_user) print(created_server) @@ -200,7 +113,7 @@ async def test_create_mcp_server(mock_get_client, server, default_user): async def test_create_mcp_server_with_tools(mock_get_client, server, default_user): """Test that creating an MCP server automatically syncs and persists its tools.""" from letta.functions.mcp_client.types import MCPToolHealth - from letta.schemas.mcp import MCPServer, MCPServerType, SSEServerConfig + from letta.schemas.mcp import MCPServer, MCPServerType from letta.settings import tool_settings if tool_settings.mcp_read_from_config: @@ -795,7 +708,7 @@ async def test_mcp_server_delete_removes_all_sessions_for_url_and_user(server, d @pytest.mark.asyncio async def test_mcp_server_resync_tools(server, default_user, default_organization): """Test that resyncing MCP server tools correctly handles added, deleted, and updated tools.""" - from unittest.mock import AsyncMock, MagicMock, patch + from unittest.mock import AsyncMock, patch from letta.functions.mcp_client.types import MCPTool, MCPToolHealth from letta.schemas.mcp import MCPServer as PydanticMCPServer, MCPServerType @@ -884,7 +797,7 @@ async def test_mcp_server_resync_tools(server, default_user, default_organizatio # Verify tool2 was actually deleted try: - deleted_tool = await server.tool_manager.get_tool_by_id_async(tool_id=tool2.id, actor=default_user) + await server.tool_manager.get_tool_by_id_async(tool_id=tool2.id, actor=default_user) assert False, "Tool2 should have been deleted" except Exception: pass # Expected - tool should be deleted diff --git a/tests/managers/test_message_manager.py b/tests/managers/test_message_manager.py index 8bea9347..6a689ec7 100644 --- a/tests/managers/test_message_manager.py +++ b/tests/managers/test_message_manager.py @@ -1,105 +1,18 @@ -import json -import logging -import os -import random -import re -import string -import time import uuid -from datetime import datetime, timedelta, timezone -from typing import List -from unittest.mock import AsyncMock, Mock, patch import pytest -from _pytest.python_api import approx -from anthropic.types.beta import BetaMessage -from anthropic.types.beta.messages import BetaMessageBatchIndividualResponse, BetaMessageBatchSucceededResult # Import shared fixtures and constants from conftest -from conftest import ( - CREATE_DELAY_SQLITE, - DEFAULT_EMBEDDING_CONFIG, - USING_SQLITE, -) from openai.types.chat.chat_completion_message_tool_call import ChatCompletionMessageToolCall as OpenAIToolCall, Function as OpenAIFunction -from sqlalchemy import func, select -from sqlalchemy.exc import IntegrityError, InvalidRequestError -from sqlalchemy.orm.exc import StaleDataError -from letta.config import LettaConfig -from letta.constants import ( - BASE_MEMORY_TOOLS, - BASE_SLEEPTIME_TOOLS, - BASE_TOOLS, - BASE_VOICE_SLEEPTIME_CHAT_TOOLS, - BASE_VOICE_SLEEPTIME_TOOLS, - BUILTIN_TOOLS, - DEFAULT_ORG_ID, - DEFAULT_ORG_NAME, - FILES_TOOLS, - LETTA_TOOL_EXECUTION_DIR, - LETTA_TOOL_SET, - LOCAL_ONLY_MULTI_AGENT_TOOLS, - MCP_TOOL_TAG_NAME_PREFIX, - MULTI_AGENT_TOOLS, -) -from letta.data_sources.redis_client import NoopAsyncRedisClient, get_redis_client -from letta.errors import LettaAgentNotFoundError -from letta.functions.functions import derive_openai_json_schema, parse_source_code -from letta.functions.mcp_client.types import MCPTool -from letta.helpers import ToolRulesSolver -from letta.helpers.datetime_helpers import AsyncTimer -from letta.jobs.types import ItemUpdateInfo, RequestStatusUpdateInfo, StepStatusUpdateInfo -from letta.orm import Base, Block -from letta.orm.block_history import BlockHistory -from letta.orm.errors import NoResultFound, UniqueConstraintViolationError -from letta.orm.file import FileContent as FileContentModel, FileMetadata as FileMetadataModel -from letta.schemas.agent import CreateAgent, UpdateAgent -from letta.schemas.block import Block as PydanticBlock, BlockUpdate, CreateBlock -from letta.schemas.embedding_config import EmbeddingConfig +from letta.orm.errors import UniqueConstraintViolationError from letta.schemas.enums import ( - ActorType, - AgentStepStatus, - FileProcessingStatus, - JobStatus, - JobType, MessageRole, - ProviderType, - SandboxType, - StepStatus, - TagMatchMode, - ToolType, - VectorDBProvider, ) -from letta.schemas.environment_variables import SandboxEnvironmentVariableCreate, SandboxEnvironmentVariableUpdate -from letta.schemas.file import FileMetadata, FileMetadata as PydanticFileMetadata -from letta.schemas.identity import IdentityCreate, IdentityProperty, IdentityPropertyType, IdentityType, IdentityUpdate, IdentityUpsert -from letta.schemas.job import BatchJob, Job, Job as PydanticJob, JobUpdate, LettaRequestConfig from letta.schemas.letta_message import UpdateAssistantMessage, UpdateReasoningMessage, UpdateSystemMessage, UpdateUserMessage from letta.schemas.letta_message_content import TextContent -from letta.schemas.letta_stop_reason import LettaStopReason, StopReasonType -from letta.schemas.llm_batch_job import AgentStepState, LLMBatchItem -from letta.schemas.llm_config import LLMConfig -from letta.schemas.message import Message as PydanticMessage, MessageCreate, MessageUpdate -from letta.schemas.openai.chat_completion_response import UsageStatistics -from letta.schemas.organization import Organization, Organization as PydanticOrganization, OrganizationUpdate -from letta.schemas.passage import Passage as PydanticPassage -from letta.schemas.pip_requirement import PipRequirement -from letta.schemas.run import Run as PydanticRun -from letta.schemas.sandbox_config import E2BSandboxConfig, LocalSandboxConfig, SandboxConfigCreate, SandboxConfigUpdate -from letta.schemas.source import Source as PydanticSource, SourceUpdate -from letta.schemas.tool import Tool as PydanticTool, ToolCreate, ToolUpdate -from letta.schemas.tool_rule import InitToolRule -from letta.schemas.user import User as PydanticUser, UserUpdate -from letta.server.db import db_registry +from letta.schemas.message import Message as PydanticMessage, MessageUpdate from letta.server.server import SyncServer -from letta.services.block_manager import BlockManager -from letta.services.helpers.agent_manager_helper import calculate_base_tools, calculate_multi_agent_tools, validate_agent_exists_async -from letta.services.step_manager import FeedbackType -from letta.settings import settings, tool_settings -from letta.utils import calculate_file_defaults_based_on_context_window -from tests.helpers.utils import comprehensive_agent_checks, validate_context_window_overview -from tests.utils import random_string # ====================================================================================================================== # AgentManager Tests - Messages Relationship @@ -301,10 +214,10 @@ async def test_modify_letta_message(server: SyncServer, sarah_agent, default_use messages = await server.message_manager.list_messages(agent_id=sarah_agent.id, actor=default_user) letta_messages = PydanticMessage.to_letta_messages_from_list(messages=messages) - system_message = [msg for msg in letta_messages if msg.message_type == "system_message"][0] - assistant_message = [msg for msg in letta_messages if msg.message_type == "assistant_message"][0] - user_message = [msg for msg in letta_messages if msg.message_type == "user_message"][0] - reasoning_message = [msg for msg in letta_messages if msg.message_type == "reasoning_message"][0] + system_message = next(msg for msg in letta_messages if msg.message_type == "system_message") + assistant_message = next(msg for msg in letta_messages if msg.message_type == "assistant_message") + user_message = next(msg for msg in letta_messages if msg.message_type == "user_message") + reasoning_message = next(msg for msg in letta_messages if msg.message_type == "reasoning_message") # user message update_user_message = UpdateUserMessage(content="Hello, Sarah!") @@ -849,7 +762,6 @@ async def test_create_many_messages_async_with_turbopuffer(server: SyncServer, s @pytest.mark.asyncio async def test_convert_tool_call_messages_no_assistant_mode(server: SyncServer, sarah_agent, default_user): """Test that when assistant mode is off, all tool calls go into a single ToolCallMessage""" - from letta.schemas.letta_message import ToolCall # create a message with multiple tool calls tool_calls = [ diff --git a/tests/managers/test_organization_manager.py b/tests/managers/test_organization_manager.py index d21d7f9e..0178ee12 100644 --- a/tests/managers/test_organization_manager.py +++ b/tests/managers/test_organization_manager.py @@ -1,105 +1,11 @@ -import json -import logging -import os -import random -import re -import string -import time -import uuid -from datetime import datetime, timedelta, timezone -from typing import List -from unittest.mock import AsyncMock, Mock, patch - import pytest -from _pytest.python_api import approx -from anthropic.types.beta import BetaMessage -from anthropic.types.beta.messages import BetaMessageBatchIndividualResponse, BetaMessageBatchSucceededResult # Import shared fixtures and constants from conftest -from conftest import ( - CREATE_DELAY_SQLITE, - DEFAULT_EMBEDDING_CONFIG, - USING_SQLITE, -) -from openai.types.chat.chat_completion_message_tool_call import ChatCompletionMessageToolCall as OpenAIToolCall, Function as OpenAIFunction -from sqlalchemy import func, select -from sqlalchemy.exc import IntegrityError, InvalidRequestError -from sqlalchemy.orm.exc import StaleDataError - -from letta.config import LettaConfig from letta.constants import ( - BASE_MEMORY_TOOLS, - BASE_SLEEPTIME_TOOLS, - BASE_TOOLS, - BASE_VOICE_SLEEPTIME_CHAT_TOOLS, - BASE_VOICE_SLEEPTIME_TOOLS, - BUILTIN_TOOLS, - DEFAULT_ORG_ID, DEFAULT_ORG_NAME, - FILES_TOOLS, - LETTA_TOOL_EXECUTION_DIR, - LETTA_TOOL_SET, - LOCAL_ONLY_MULTI_AGENT_TOOLS, - MCP_TOOL_TAG_NAME_PREFIX, - MULTI_AGENT_TOOLS, ) -from letta.data_sources.redis_client import NoopAsyncRedisClient, get_redis_client -from letta.errors import LettaAgentNotFoundError -from letta.functions.functions import derive_openai_json_schema, parse_source_code -from letta.functions.mcp_client.types import MCPTool -from letta.helpers import ToolRulesSolver -from letta.helpers.datetime_helpers import AsyncTimer -from letta.jobs.types import ItemUpdateInfo, RequestStatusUpdateInfo, StepStatusUpdateInfo -from letta.orm import Base, Block -from letta.orm.block_history import BlockHistory -from letta.orm.errors import NoResultFound, UniqueConstraintViolationError -from letta.orm.file import FileContent as FileContentModel, FileMetadata as FileMetadataModel -from letta.schemas.agent import CreateAgent, UpdateAgent -from letta.schemas.block import Block as PydanticBlock, BlockUpdate, CreateBlock -from letta.schemas.embedding_config import EmbeddingConfig -from letta.schemas.enums import ( - ActorType, - AgentStepStatus, - FileProcessingStatus, - JobStatus, - JobType, - MessageRole, - ProviderType, - SandboxType, - StepStatus, - TagMatchMode, - ToolType, - VectorDBProvider, -) -from letta.schemas.environment_variables import SandboxEnvironmentVariableCreate, SandboxEnvironmentVariableUpdate -from letta.schemas.file import FileMetadata, FileMetadata as PydanticFileMetadata -from letta.schemas.identity import IdentityCreate, IdentityProperty, IdentityPropertyType, IdentityType, IdentityUpdate, IdentityUpsert -from letta.schemas.job import BatchJob, Job, Job as PydanticJob, JobUpdate, LettaRequestConfig -from letta.schemas.letta_message import UpdateAssistantMessage, UpdateReasoningMessage, UpdateSystemMessage, UpdateUserMessage -from letta.schemas.letta_message_content import TextContent -from letta.schemas.letta_stop_reason import LettaStopReason, StopReasonType -from letta.schemas.llm_batch_job import AgentStepState, LLMBatchItem -from letta.schemas.llm_config import LLMConfig -from letta.schemas.message import Message as PydanticMessage, MessageCreate, MessageUpdate -from letta.schemas.openai.chat_completion_response import UsageStatistics -from letta.schemas.organization import Organization, Organization as PydanticOrganization, OrganizationUpdate -from letta.schemas.passage import Passage as PydanticPassage -from letta.schemas.pip_requirement import PipRequirement -from letta.schemas.run import Run as PydanticRun -from letta.schemas.sandbox_config import E2BSandboxConfig, LocalSandboxConfig, SandboxConfigCreate, SandboxConfigUpdate -from letta.schemas.source import Source as PydanticSource, SourceUpdate -from letta.schemas.tool import Tool as PydanticTool, ToolCreate, ToolUpdate -from letta.schemas.tool_rule import InitToolRule -from letta.schemas.user import User as PydanticUser, UserUpdate -from letta.server.db import db_registry +from letta.schemas.organization import Organization as PydanticOrganization, OrganizationUpdate from letta.server.server import SyncServer -from letta.services.block_manager import BlockManager -from letta.services.helpers.agent_manager_helper import calculate_base_tools, calculate_multi_agent_tools, validate_agent_exists_async -from letta.services.step_manager import FeedbackType -from letta.settings import settings, tool_settings -from letta.utils import calculate_file_defaults_based_on_context_window -from tests.helpers.utils import comprehensive_agent_checks, validate_context_window_overview -from tests.utils import random_string # ====================================================================================================================== diff --git a/tests/managers/test_passage_manager.py b/tests/managers/test_passage_manager.py index e5020dea..d763995d 100644 --- a/tests/managers/test_passage_manager.py +++ b/tests/managers/test_passage_manager.py @@ -1,105 +1,22 @@ import json -import logging import os -import random -import re -import string -import time -import uuid from datetime import datetime, timedelta, timezone -from typing import List -from unittest.mock import AsyncMock, Mock, patch +from unittest.mock import Mock import pytest from _pytest.python_api import approx -from anthropic.types.beta import BetaMessage -from anthropic.types.beta.messages import BetaMessageBatchIndividualResponse, BetaMessageBatchSucceededResult # Import shared fixtures and constants from conftest from conftest import ( - CREATE_DELAY_SQLITE, DEFAULT_EMBEDDING_CONFIG, - USING_SQLITE, ) -from openai.types.chat.chat_completion_message_tool_call import ChatCompletionMessageToolCall as OpenAIToolCall, Function as OpenAIFunction -from sqlalchemy import func, select -from sqlalchemy.exc import IntegrityError, InvalidRequestError -from sqlalchemy.orm.exc import StaleDataError -from letta.config import LettaConfig -from letta.constants import ( - BASE_MEMORY_TOOLS, - BASE_SLEEPTIME_TOOLS, - BASE_TOOLS, - BASE_VOICE_SLEEPTIME_CHAT_TOOLS, - BASE_VOICE_SLEEPTIME_TOOLS, - BUILTIN_TOOLS, - DEFAULT_ORG_ID, - DEFAULT_ORG_NAME, - FILES_TOOLS, - LETTA_TOOL_EXECUTION_DIR, - LETTA_TOOL_SET, - LOCAL_ONLY_MULTI_AGENT_TOOLS, - MCP_TOOL_TAG_NAME_PREFIX, - MULTI_AGENT_TOOLS, -) -from letta.data_sources.redis_client import NoopAsyncRedisClient, get_redis_client -from letta.errors import LettaAgentNotFoundError -from letta.functions.functions import derive_openai_json_schema, parse_source_code -from letta.functions.mcp_client.types import MCPTool -from letta.helpers import ToolRulesSolver -from letta.helpers.datetime_helpers import AsyncTimer -from letta.jobs.types import ItemUpdateInfo, RequestStatusUpdateInfo, StepStatusUpdateInfo -from letta.orm import Base, Block -from letta.orm.block_history import BlockHistory -from letta.orm.errors import NoResultFound, UniqueConstraintViolationError -from letta.orm.file import FileContent as FileContentModel, FileMetadata as FileMetadataModel -from letta.schemas.agent import AgentState, CreateAgent, UpdateAgent -from letta.schemas.block import Block as PydanticBlock, BlockUpdate, CreateBlock -from letta.schemas.embedding_config import EmbeddingConfig +from letta.orm.errors import NoResultFound from letta.schemas.enums import ( - ActorType, - AgentStepStatus, - FileProcessingStatus, - JobStatus, - JobType, - MessageRole, - ProviderType, - SandboxType, - StepStatus, TagMatchMode, - ToolType, - VectorDBProvider, ) -from letta.schemas.environment_variables import SandboxEnvironmentVariableCreate, SandboxEnvironmentVariableUpdate -from letta.schemas.file import FileMetadata, FileMetadata as PydanticFileMetadata -from letta.schemas.identity import IdentityCreate, IdentityProperty, IdentityPropertyType, IdentityType, IdentityUpdate, IdentityUpsert -from letta.schemas.job import BatchJob, Job, Job as PydanticJob, JobUpdate, LettaRequestConfig -from letta.schemas.letta_message import UpdateAssistantMessage, UpdateReasoningMessage, UpdateSystemMessage, UpdateUserMessage -from letta.schemas.letta_message_content import TextContent -from letta.schemas.letta_stop_reason import LettaStopReason, StopReasonType -from letta.schemas.llm_batch_job import AgentStepState, LLMBatchItem -from letta.schemas.llm_config import LLMConfig -from letta.schemas.message import Message as PydanticMessage, MessageCreate, MessageUpdate -from letta.schemas.openai.chat_completion_response import UsageStatistics -from letta.schemas.organization import Organization, Organization as PydanticOrganization, OrganizationUpdate from letta.schemas.passage import Passage as PydanticPassage -from letta.schemas.pip_requirement import PipRequirement -from letta.schemas.run import Run as PydanticRun -from letta.schemas.sandbox_config import E2BSandboxConfig, LocalSandboxConfig, SandboxConfigCreate, SandboxConfigUpdate -from letta.schemas.source import Source as PydanticSource, SourceUpdate -from letta.schemas.tool import Tool as PydanticTool, ToolCreate, ToolUpdate -from letta.schemas.tool_rule import InitToolRule -from letta.schemas.user import User as PydanticUser, UserUpdate -from letta.server.db import db_registry from letta.server.server import SyncServer -from letta.services.block_manager import BlockManager -from letta.services.helpers.agent_manager_helper import calculate_base_tools, calculate_multi_agent_tools, validate_agent_exists_async -from letta.services.step_manager import FeedbackType -from letta.settings import settings, tool_settings -from letta.utils import calculate_file_defaults_based_on_context_window -from tests.helpers.utils import comprehensive_agent_checks, validate_context_window_overview -from tests.utils import random_string # ====================================================================================================================== # Agent Manager - Passages Tests diff --git a/tests/managers/test_provider_manager.py b/tests/managers/test_provider_manager.py index fb135be8..94b9593c 100644 --- a/tests/managers/test_provider_manager.py +++ b/tests/managers/test_provider_manager.py @@ -1,12 +1,10 @@ """Tests for ProviderManager encryption/decryption logic.""" -import os - import pytest from letta.orm.provider import Provider as ProviderModel from letta.schemas.enums import ProviderCategory, ProviderType -from letta.schemas.providers import Provider, ProviderCreate, ProviderUpdate +from letta.schemas.providers import ProviderCreate, ProviderUpdate from letta.schemas.secret import Secret from letta.server.db import db_registry from letta.services.organization_manager import OrganizationManager @@ -501,11 +499,7 @@ async def test_server_startup_syncs_base_providers(default_user, default_organiz 3. Models are properly persisted to the database with correct metadata 4. Models can be retrieved using handles """ - from unittest.mock import AsyncMock - from letta.schemas.embedding_config import EmbeddingConfig - from letta.schemas.llm_config import LLMConfig - from letta.schemas.providers import AnthropicProvider, OpenAIProvider from letta.server.server import SyncServer # Mock OpenAI API responses @@ -563,16 +557,31 @@ async def test_server_startup_syncs_base_providers(default_user, default_organiz async def mock_openai_get_model_list_async(*args, **kwargs): return mock_openai_models - # Mock Anthropic models.list() response - from unittest.mock import MagicMock + # Mock Anthropic models.list() response as an async iterable + # (the real SDK returns an AsyncPage that supports async iteration) - mock_anthropic_response = MagicMock() - mock_anthropic_response.model_dump.return_value = mock_anthropic_models + class MockAnthropicModelItem: + def __init__(self, data): + self._data = data + + def model_dump(self): + return self._data + + class MockAnthropicAsyncPage: + def __init__(self, items): + self._items = [MockAnthropicModelItem(item) for item in items] + + def __aiter__(self): + return self._async_iter() + + async def _async_iter(self): + for item in self._items: + yield item # Mock the Anthropic AsyncAnthropic client class MockAnthropicModels: async def list(self): - return mock_anthropic_response + return MockAnthropicAsyncPage(mock_anthropic_models["data"]) class MockAsyncAnthropic: def __init__(self, *args, **kwargs): @@ -745,7 +754,7 @@ async def test_server_startup_handles_disabled_providers(default_user, default_o 2. BYOK providers that are no longer enabled are NOT deleted (user-created) 3. The sync process handles providers gracefully when API calls fail """ - from letta.schemas.providers import OpenAIProvider, ProviderCreate + from letta.schemas.providers import ProviderCreate from letta.server.server import SyncServer # First, manually create providers in the database @@ -833,31 +842,44 @@ async def test_server_startup_handles_api_errors_gracefully(default_user, defaul 2. Other providers can still sync successfully 3. The server startup completes without crashing """ - from letta.schemas.providers import AnthropicProvider, OpenAIProvider from letta.server.server import SyncServer # Mock OpenAI to fail async def mock_openai_fail(*args, **kwargs): raise Exception("OpenAI API is down") - # Mock Anthropic to succeed - from unittest.mock import MagicMock + # Mock Anthropic to succeed (as async iterable, matching real SDK pagination) - mock_anthropic_response = MagicMock() - mock_anthropic_response.model_dump.return_value = { - "data": [ - { - "id": "claude-3-5-sonnet-20241022", - "type": "model", - "display_name": "Claude 3.5 Sonnet", - "created_at": "2024-10-22T00:00:00Z", - } - ] - } + mock_anthropic_data = [ + { + "id": "claude-3-5-sonnet-20241022", + "type": "model", + "display_name": "Claude 3.5 Sonnet", + "created_at": "2024-10-22T00:00:00Z", + } + ] + + class MockAnthropicModelItem: + def __init__(self, data): + self._data = data + + def model_dump(self): + return self._data + + class MockAnthropicAsyncPage: + def __init__(self, items): + self._items = [MockAnthropicModelItem(item) for item in items] + + def __aiter__(self): + return self._async_iter() + + async def _async_iter(self): + for item in self._items: + yield item class MockAnthropicModels: async def list(self): - return mock_anthropic_response + return MockAnthropicAsyncPage(mock_anthropic_data) class MockAsyncAnthropic: def __init__(self, *args, **kwargs): @@ -915,7 +937,7 @@ async def test_server_startup_handles_api_errors_gracefully(default_user, defaul actor=default_user, ) if len(openai_providers) > 0: - openai_models = await server.provider_manager.list_models_async( + await server.provider_manager.list_models_async( actor=default_user, provider_id=openai_providers[0].id, ) diff --git a/tests/managers/test_run_manager.py b/tests/managers/test_run_manager.py index bde73cfb..1167c19d 100644 --- a/tests/managers/test_run_manager.py +++ b/tests/managers/test_run_manager.py @@ -1,105 +1,27 @@ -import json -import logging -import os -import random -import re -import string -import time import uuid from datetime import datetime, timedelta, timezone -from typing import List -from unittest.mock import AsyncMock, Mock, patch +from unittest.mock import AsyncMock, patch import pytest -from _pytest.python_api import approx -from anthropic.types.beta import BetaMessage -from anthropic.types.beta.messages import BetaMessageBatchIndividualResponse, BetaMessageBatchSucceededResult # Import shared fixtures and constants from conftest -from conftest import ( - CREATE_DELAY_SQLITE, - DEFAULT_EMBEDDING_CONFIG, - USING_SQLITE, -) from openai.types.chat.chat_completion_message_tool_call import ChatCompletionMessageToolCall as OpenAIToolCall, Function as OpenAIFunction -from sqlalchemy import func, select -from sqlalchemy.exc import IntegrityError, InvalidRequestError -from sqlalchemy.orm.exc import StaleDataError -from letta.config import LettaConfig -from letta.constants import ( - BASE_MEMORY_TOOLS, - BASE_SLEEPTIME_TOOLS, - BASE_TOOLS, - BASE_VOICE_SLEEPTIME_CHAT_TOOLS, - BASE_VOICE_SLEEPTIME_TOOLS, - BUILTIN_TOOLS, - DEFAULT_ORG_ID, - DEFAULT_ORG_NAME, - FILES_TOOLS, - LETTA_TOOL_EXECUTION_DIR, - LETTA_TOOL_SET, - LOCAL_ONLY_MULTI_AGENT_TOOLS, - MCP_TOOL_TAG_NAME_PREFIX, - MULTI_AGENT_TOOLS, -) -from letta.data_sources.redis_client import NoopAsyncRedisClient, get_redis_client -from letta.errors import LettaAgentNotFoundError, LettaInvalidArgumentError -from letta.functions.functions import derive_openai_json_schema, parse_source_code -from letta.functions.mcp_client.types import MCPTool -from letta.helpers import ToolRulesSolver -from letta.helpers.datetime_helpers import AsyncTimer -from letta.jobs.types import ItemUpdateInfo, RequestStatusUpdateInfo, StepStatusUpdateInfo -from letta.orm import Base, Block -from letta.orm.block_history import BlockHistory -from letta.orm.errors import NoResultFound, UniqueConstraintViolationError -from letta.orm.file import FileContent as FileContentModel, FileMetadata as FileMetadataModel -from letta.schemas.agent import CreateAgent, UpdateAgent -from letta.schemas.block import Block as PydanticBlock, BlockUpdate, CreateBlock -from letta.schemas.embedding_config import EmbeddingConfig +from letta.errors import LettaInvalidArgumentError +from letta.orm.errors import NoResultFound from letta.schemas.enums import ( - ActorType, - AgentStepStatus, - FileProcessingStatus, - JobStatus, - JobType, MessageRole, - ProviderType, RunStatus, - SandboxType, - StepStatus, - TagMatchMode, - ToolType, - VectorDBProvider, ) -from letta.schemas.environment_variables import SandboxEnvironmentVariableCreate, SandboxEnvironmentVariableUpdate -from letta.schemas.file import FileMetadata, FileMetadata as PydanticFileMetadata -from letta.schemas.identity import IdentityCreate, IdentityProperty, IdentityPropertyType, IdentityType, IdentityUpdate, IdentityUpsert -from letta.schemas.job import Job as PydanticJob, LettaRequestConfig -from letta.schemas.letta_message import UpdateAssistantMessage, UpdateReasoningMessage, UpdateSystemMessage, UpdateUserMessage +from letta.schemas.job import LettaRequestConfig from letta.schemas.letta_message_content import TextContent -from letta.schemas.letta_stop_reason import LettaStopReason, StopReasonType -from letta.schemas.llm_config import LLMConfig -from letta.schemas.message import Message, Message as PydanticMessage, MessageCreate, MessageUpdate, ToolReturn +from letta.schemas.letta_stop_reason import StopReasonType +from letta.schemas.message import Message, Message as PydanticMessage, ToolReturn from letta.schemas.openai.chat_completion_response import UsageStatistics -from letta.schemas.organization import Organization, Organization as PydanticOrganization, OrganizationUpdate -from letta.schemas.passage import Passage as PydanticPassage -from letta.schemas.pip_requirement import PipRequirement from letta.schemas.run import Run as PydanticRun, RunUpdate -from letta.schemas.sandbox_config import E2BSandboxConfig, LocalSandboxConfig, SandboxConfigCreate, SandboxConfigUpdate -from letta.schemas.source import Source as PydanticSource, SourceUpdate -from letta.schemas.tool import Tool as PydanticTool, ToolCreate, ToolUpdate -from letta.schemas.tool_rule import InitToolRule -from letta.schemas.user import User as PydanticUser, UserUpdate -from letta.server.db import db_registry +from letta.schemas.user import User as PydanticUser from letta.server.server import SyncServer -from letta.services.block_manager import BlockManager -from letta.services.helpers.agent_manager_helper import calculate_base_tools, calculate_multi_agent_tools, validate_agent_exists_async from letta.services.step_manager import FeedbackType -from letta.settings import settings, tool_settings -from letta.utils import calculate_file_defaults_based_on_context_window -from tests.helpers.utils import comprehensive_agent_checks, validate_context_window_overview -from tests.utils import random_string # ====================================================================================================================== # RunManager Tests @@ -239,8 +161,7 @@ async def test_update_run_updates_agent_last_stop_reason(server: SyncServer, sar """Test that completing a run updates the agent's last_stop_reason.""" # Verify agent starts with no last_stop_reason - agent = await server.agent_manager.get_agent_by_id_async(agent_id=sarah_agent.id, actor=default_user) - initial_stop_reason = agent.last_stop_reason + await server.agent_manager.get_agent_by_id_async(agent_id=sarah_agent.id, actor=default_user) # Create a run run_data = PydanticRun(agent_id=sarah_agent.id) @@ -945,7 +866,7 @@ async def test_run_messages_ordering(server: SyncServer, default_run, default_us created_at=created_at, run_id=run.id, ) - msg = await server.message_manager.create_many_messages_async([message], actor=default_user) + await server.message_manager.create_many_messages_async([message], actor=default_user) # Verify messages are returned in chronological order returned_messages = await server.message_manager.list_messages( @@ -1093,7 +1014,7 @@ async def test_get_run_messages(server: SyncServer, default_user: PydanticUser, ) ) - created_msg = await server.message_manager.create_many_messages_async(messages, actor=default_user) + await server.message_manager.create_many_messages_async(messages, actor=default_user) # Get messages and verify they're converted correctly result = await server.message_manager.list_messages(run_id=run.id, actor=default_user) @@ -1166,7 +1087,7 @@ async def test_get_run_messages_with_assistant_message(server: SyncServer, defau ) ) - created_msg = await server.message_manager.create_many_messages_async(messages, actor=default_user) + await server.message_manager.create_many_messages_async(messages, actor=default_user) # Get messages and verify they're converted correctly result = await server.message_manager.list_messages(run_id=run.id, actor=default_user) @@ -1447,7 +1368,7 @@ async def test_run_metrics_duration_calculation(server: SyncServer, sarah_agent, await asyncio.sleep(0.1) # Wait 100ms # Update the run to completed - updated_run = await server.run_manager.update_run_by_id_async( + await server.run_manager.update_run_by_id_async( created_run.id, RunUpdate(status=RunStatus.completed, stop_reason=StopReasonType.end_turn), actor=default_user ) @@ -1741,7 +1662,7 @@ def test_convert_statuses_to_enum_with_invalid_status(): async def test_list_runs_with_multiple_statuses(server: SyncServer, sarah_agent, default_user): """Test listing runs with multiple status filters.""" # Create runs with different statuses - run_created = await server.run_manager.create_run( + await server.run_manager.create_run( pydantic_run=PydanticRun( status=RunStatus.created, agent_id=sarah_agent.id, @@ -1749,7 +1670,7 @@ async def test_list_runs_with_multiple_statuses(server: SyncServer, sarah_agent, ), actor=default_user, ) - run_running = await server.run_manager.create_run( + await server.run_manager.create_run( pydantic_run=PydanticRun( status=RunStatus.running, agent_id=sarah_agent.id, @@ -1757,7 +1678,7 @@ async def test_list_runs_with_multiple_statuses(server: SyncServer, sarah_agent, ), actor=default_user, ) - run_completed = await server.run_manager.create_run( + await server.run_manager.create_run( pydantic_run=PydanticRun( status=RunStatus.completed, agent_id=sarah_agent.id, @@ -1765,7 +1686,7 @@ async def test_list_runs_with_multiple_statuses(server: SyncServer, sarah_agent, ), actor=default_user, ) - run_failed = await server.run_manager.create_run( + await server.run_manager.create_run( pydantic_run=PydanticRun( status=RunStatus.failed, agent_id=sarah_agent.id, diff --git a/tests/managers/test_sandbox_manager.py b/tests/managers/test_sandbox_manager.py index e52a7d78..f1f66af3 100644 --- a/tests/managers/test_sandbox_manager.py +++ b/tests/managers/test_sandbox_manager.py @@ -1,105 +1,23 @@ -import json -import logging -import os -import random -import re -import string import time -import uuid -from datetime import datetime, timedelta, timezone -from typing import List -from unittest.mock import AsyncMock, Mock, patch import pytest -from _pytest.python_api import approx -from anthropic.types.beta import BetaMessage -from anthropic.types.beta.messages import BetaMessageBatchIndividualResponse, BetaMessageBatchSucceededResult # Import shared fixtures and constants from conftest from conftest import ( CREATE_DELAY_SQLITE, - DEFAULT_EMBEDDING_CONFIG, USING_SQLITE, ) -from openai.types.chat.chat_completion_message_tool_call import ChatCompletionMessageToolCall as OpenAIToolCall, Function as OpenAIFunction -from sqlalchemy import func, select -from sqlalchemy.exc import IntegrityError, InvalidRequestError -from sqlalchemy.orm.exc import StaleDataError -from letta.config import LettaConfig from letta.constants import ( - BASE_MEMORY_TOOLS, - BASE_SLEEPTIME_TOOLS, - BASE_TOOLS, - BASE_VOICE_SLEEPTIME_CHAT_TOOLS, - BASE_VOICE_SLEEPTIME_TOOLS, - BUILTIN_TOOLS, - DEFAULT_ORG_ID, - DEFAULT_ORG_NAME, - FILES_TOOLS, LETTA_TOOL_EXECUTION_DIR, - LETTA_TOOL_SET, - LOCAL_ONLY_MULTI_AGENT_TOOLS, - MCP_TOOL_TAG_NAME_PREFIX, - MULTI_AGENT_TOOLS, ) -from letta.data_sources.redis_client import NoopAsyncRedisClient, get_redis_client -from letta.errors import LettaAgentNotFoundError -from letta.functions.functions import derive_openai_json_schema, parse_source_code -from letta.functions.mcp_client.types import MCPTool -from letta.helpers import ToolRulesSolver -from letta.helpers.datetime_helpers import AsyncTimer -from letta.jobs.types import ItemUpdateInfo, RequestStatusUpdateInfo, StepStatusUpdateInfo -from letta.orm import Base, Block -from letta.orm.block_history import BlockHistory -from letta.orm.errors import NoResultFound, UniqueConstraintViolationError -from letta.orm.file import FileContent as FileContentModel, FileMetadata as FileMetadataModel -from letta.schemas.agent import CreateAgent, UpdateAgent -from letta.schemas.block import Block as PydanticBlock, BlockUpdate, CreateBlock -from letta.schemas.embedding_config import EmbeddingConfig from letta.schemas.enums import ( - ActorType, - AgentStepStatus, - FileProcessingStatus, - JobStatus, - JobType, - MessageRole, - ProviderType, SandboxType, - StepStatus, - TagMatchMode, - ToolType, - VectorDBProvider, ) from letta.schemas.environment_variables import SandboxEnvironmentVariableCreate, SandboxEnvironmentVariableUpdate -from letta.schemas.file import FileMetadata, FileMetadata as PydanticFileMetadata -from letta.schemas.identity import IdentityCreate, IdentityProperty, IdentityPropertyType, IdentityType, IdentityUpdate, IdentityUpsert -from letta.schemas.job import BatchJob, Job, Job as PydanticJob, JobUpdate, LettaRequestConfig -from letta.schemas.letta_message import UpdateAssistantMessage, UpdateReasoningMessage, UpdateSystemMessage, UpdateUserMessage -from letta.schemas.letta_message_content import TextContent -from letta.schemas.letta_stop_reason import LettaStopReason, StopReasonType -from letta.schemas.llm_batch_job import AgentStepState, LLMBatchItem -from letta.schemas.llm_config import LLMConfig -from letta.schemas.message import Message as PydanticMessage, MessageCreate, MessageUpdate -from letta.schemas.openai.chat_completion_response import UsageStatistics -from letta.schemas.organization import Organization, Organization as PydanticOrganization, OrganizationUpdate -from letta.schemas.passage import Passage as PydanticPassage -from letta.schemas.pip_requirement import PipRequirement -from letta.schemas.run import Run as PydanticRun from letta.schemas.sandbox_config import E2BSandboxConfig, LocalSandboxConfig, SandboxConfigCreate, SandboxConfigUpdate -from letta.schemas.source import Source as PydanticSource, SourceUpdate -from letta.schemas.tool import Tool as PydanticTool, ToolCreate, ToolUpdate -from letta.schemas.tool_rule import InitToolRule -from letta.schemas.user import User as PydanticUser, UserUpdate -from letta.server.db import db_registry from letta.server.server import SyncServer -from letta.services.block_manager import BlockManager -from letta.services.helpers.agent_manager_helper import calculate_base_tools, calculate_multi_agent_tools, validate_agent_exists_async -from letta.services.step_manager import FeedbackType -from letta.settings import settings, tool_settings -from letta.utils import calculate_file_defaults_based_on_context_window -from tests.helpers.utils import comprehensive_agent_checks, validate_context_window_overview -from tests.utils import random_string +from letta.settings import tool_settings # ====================================================================================================================== # SandboxConfigManager Tests - Sandbox Configs diff --git a/tests/managers/test_source_manager.py b/tests/managers/test_source_manager.py index b3a2418e..a7e032da 100644 --- a/tests/managers/test_source_manager.py +++ b/tests/managers/test_source_manager.py @@ -1,19 +1,8 @@ -import json -import logging -import os -import random -import re -import string import time import uuid -from datetime import datetime, timedelta, timezone -from typing import List -from unittest.mock import AsyncMock, Mock, patch +from unittest.mock import patch import pytest -from _pytest.python_api import approx -from anthropic.types.beta import BetaMessage -from anthropic.types.beta.messages import BetaMessageBatchIndividualResponse, BetaMessageBatchSucceededResult # Import shared fixtures and constants from conftest from conftest import ( @@ -21,85 +10,22 @@ from conftest import ( DEFAULT_EMBEDDING_CONFIG, USING_SQLITE, ) -from openai.types.chat.chat_completion_message_tool_call import ChatCompletionMessageToolCall as OpenAIToolCall, Function as OpenAIFunction from sqlalchemy import func, select -from sqlalchemy.exc import IntegrityError, InvalidRequestError -from sqlalchemy.orm.exc import StaleDataError +from sqlalchemy.exc import InvalidRequestError -from letta.config import LettaConfig -from letta.constants import ( - BASE_MEMORY_TOOLS, - BASE_SLEEPTIME_TOOLS, - BASE_TOOLS, - BASE_VOICE_SLEEPTIME_CHAT_TOOLS, - BASE_VOICE_SLEEPTIME_TOOLS, - BUILTIN_TOOLS, - DEFAULT_ORG_ID, - DEFAULT_ORG_NAME, - FILES_TOOLS, - LETTA_TOOL_EXECUTION_DIR, - LETTA_TOOL_SET, - LOCAL_ONLY_MULTI_AGENT_TOOLS, - MCP_TOOL_TAG_NAME_PREFIX, - MULTI_AGENT_TOOLS, -) -from letta.data_sources.redis_client import NoopAsyncRedisClient, get_redis_client from letta.errors import LettaAgentNotFoundError, LettaInvalidArgumentError -from letta.functions.functions import derive_openai_json_schema, parse_source_code -from letta.functions.mcp_client.types import MCPTool -from letta.helpers import ToolRulesSolver -from letta.helpers.datetime_helpers import AsyncTimer -from letta.jobs.types import ItemUpdateInfo, RequestStatusUpdateInfo, StepStatusUpdateInfo -from letta.orm import Base, Block -from letta.orm.block_history import BlockHistory from letta.orm.errors import NoResultFound, UniqueConstraintViolationError from letta.orm.file import FileContent as FileContentModel, FileMetadata as FileMetadataModel -from letta.schemas.agent import CreateAgent, UpdateAgent -from letta.schemas.block import Block as PydanticBlock, BlockUpdate, CreateBlock +from letta.schemas.agent import CreateAgent from letta.schemas.embedding_config import EmbeddingConfig from letta.schemas.enums import ( - ActorType, - AgentStepStatus, FileProcessingStatus, - JobStatus, - JobType, - MessageRole, - ProviderType, - SandboxType, - StepStatus, - TagMatchMode, - ToolType, VectorDBProvider, ) -from letta.schemas.environment_variables import SandboxEnvironmentVariableCreate, SandboxEnvironmentVariableUpdate from letta.schemas.file import FileMetadata, FileMetadata as PydanticFileMetadata -from letta.schemas.identity import IdentityCreate, IdentityProperty, IdentityPropertyType, IdentityType, IdentityUpdate, IdentityUpsert -from letta.schemas.job import BatchJob, Job, Job as PydanticJob, JobUpdate, LettaRequestConfig -from letta.schemas.letta_message import UpdateAssistantMessage, UpdateReasoningMessage, UpdateSystemMessage, UpdateUserMessage -from letta.schemas.letta_message_content import TextContent -from letta.schemas.letta_stop_reason import LettaStopReason, StopReasonType -from letta.schemas.llm_batch_job import AgentStepState, LLMBatchItem from letta.schemas.llm_config import LLMConfig -from letta.schemas.message import Message as PydanticMessage, MessageCreate, MessageUpdate -from letta.schemas.openai.chat_completion_response import UsageStatistics -from letta.schemas.organization import Organization, Organization as PydanticOrganization, OrganizationUpdate -from letta.schemas.passage import Passage as PydanticPassage -from letta.schemas.pip_requirement import PipRequirement -from letta.schemas.run import Run as PydanticRun -from letta.schemas.sandbox_config import E2BSandboxConfig, LocalSandboxConfig, SandboxConfigCreate, SandboxConfigUpdate from letta.schemas.source import Source as PydanticSource, SourceUpdate -from letta.schemas.tool import Tool as PydanticTool, ToolCreate, ToolUpdate -from letta.schemas.tool_rule import InitToolRule -from letta.schemas.user import User as PydanticUser, UserUpdate -from letta.server.db import db_registry from letta.server.server import SyncServer -from letta.services.block_manager import BlockManager -from letta.services.helpers.agent_manager_helper import calculate_base_tools, calculate_multi_agent_tools, validate_agent_exists_async -from letta.services.step_manager import FeedbackType -from letta.settings import settings, tool_settings -from letta.utils import calculate_file_defaults_based_on_context_window -from tests.helpers.utils import comprehensive_agent_checks, validate_context_window_overview -from tests.utils import random_string # Helper function for file content tests @@ -461,7 +387,7 @@ async def test_create_sources_with_same_name_raises_error(server: SyncServer, de metadata={"type": "medical"}, embedding_config=DEFAULT_EMBEDDING_CONFIG, ) - source = await server.source_manager.create_source(source=source_pydantic, actor=default_user) + await server.source_manager.create_source(source=source_pydantic, actor=default_user) # Attempting to create another source with the same name should raise an IntegrityError source_pydantic = PydanticSource( @@ -1194,7 +1120,7 @@ async def test_file_status_invalid_transitions(server, default_user, default_sou ) created = await server.file_manager.create_file(file_metadata=meta, actor=default_user) - with pytest.raises(ValueError, match="Invalid state transition.*pending.*COMPLETED"): + with pytest.raises(ValueError, match=r"Invalid state transition.*pending.*COMPLETED"): await server.file_manager.update_file_status( file_id=created.id, actor=default_user, @@ -1216,7 +1142,7 @@ async def test_file_status_invalid_transitions(server, default_user, default_sou processing_status=FileProcessingStatus.PARSING, ) - with pytest.raises(ValueError, match="Invalid state transition.*parsing.*COMPLETED"): + with pytest.raises(ValueError, match=r"Invalid state transition.*parsing.*COMPLETED"): await server.file_manager.update_file_status( file_id=created2.id, actor=default_user, @@ -1233,7 +1159,7 @@ async def test_file_status_invalid_transitions(server, default_user, default_sou ) created3 = await server.file_manager.create_file(file_metadata=meta3, actor=default_user) - with pytest.raises(ValueError, match="Invalid state transition.*pending.*EMBEDDING"): + with pytest.raises(ValueError, match=r"Invalid state transition.*pending.*EMBEDDING"): await server.file_manager.update_file_status( file_id=created3.id, actor=default_user, @@ -1260,14 +1186,14 @@ async def test_file_status_terminal_states(server, default_user, default_source) await server.file_manager.update_file_status(file_id=created.id, actor=default_user, processing_status=FileProcessingStatus.COMPLETED) # Cannot transition from COMPLETED to any state - with pytest.raises(ValueError, match="Cannot update.*terminal state completed"): + with pytest.raises(ValueError, match=r"Cannot update.*terminal state completed"): await server.file_manager.update_file_status( file_id=created.id, actor=default_user, processing_status=FileProcessingStatus.EMBEDDING, ) - with pytest.raises(ValueError, match="Cannot update.*terminal state completed"): + with pytest.raises(ValueError, match=r"Cannot update.*terminal state completed"): await server.file_manager.update_file_status( file_id=created.id, actor=default_user, @@ -1293,7 +1219,7 @@ async def test_file_status_terminal_states(server, default_user, default_source) ) # Cannot transition from ERROR to any state - with pytest.raises(ValueError, match="Cannot update.*terminal state error"): + with pytest.raises(ValueError, match=r"Cannot update.*terminal state error"): await server.file_manager.update_file_status( file_id=created2.id, actor=default_user, @@ -1387,7 +1313,7 @@ async def test_file_status_terminal_state_non_status_updates(server, default_use await server.file_manager.update_file_status(file_id=created.id, actor=default_user, processing_status=FileProcessingStatus.COMPLETED) # Cannot update chunks_embedded in COMPLETED state - with pytest.raises(ValueError, match="Cannot update.*terminal state completed"): + with pytest.raises(ValueError, match=r"Cannot update.*terminal state completed"): await server.file_manager.update_file_status( file_id=created.id, actor=default_user, @@ -1395,7 +1321,7 @@ async def test_file_status_terminal_state_non_status_updates(server, default_use ) # Cannot update total_chunks in COMPLETED state - with pytest.raises(ValueError, match="Cannot update.*terminal state completed"): + with pytest.raises(ValueError, match=r"Cannot update.*terminal state completed"): await server.file_manager.update_file_status( file_id=created.id, actor=default_user, @@ -1403,7 +1329,7 @@ async def test_file_status_terminal_state_non_status_updates(server, default_use ) # Cannot update error_message in COMPLETED state - with pytest.raises(ValueError, match="Cannot update.*terminal state completed"): + with pytest.raises(ValueError, match=r"Cannot update.*terminal state completed"): await server.file_manager.update_file_status( file_id=created.id, actor=default_user, @@ -1427,7 +1353,7 @@ async def test_file_status_terminal_state_non_status_updates(server, default_use ) # Cannot update chunks_embedded in ERROR state - with pytest.raises(ValueError, match="Cannot update.*terminal state error"): + with pytest.raises(ValueError, match=r"Cannot update.*terminal state error"): await server.file_manager.update_file_status( file_id=created2.id, actor=default_user, @@ -1473,7 +1399,7 @@ async def test_file_status_race_condition_prevention(server, default_user, defau # Try to continue with EMBEDDING as if error didn't happen (race condition) # This should fail because file is in ERROR state - with pytest.raises(ValueError, match="Cannot update.*terminal state error"): + with pytest.raises(ValueError, match=r"Cannot update.*terminal state error"): await server.file_manager.update_file_status( file_id=created.id, actor=default_user, @@ -1498,7 +1424,7 @@ async def test_file_status_backwards_transitions(server, default_user, default_s await server.file_manager.update_file_status(file_id=created.id, actor=default_user, processing_status=FileProcessingStatus.EMBEDDING) # Cannot go back to PARSING - with pytest.raises(ValueError, match="Invalid state transition.*embedding.*PARSING"): + with pytest.raises(ValueError, match=r"Invalid state transition.*embedding.*PARSING"): await server.file_manager.update_file_status( file_id=created.id, actor=default_user, @@ -1506,7 +1432,7 @@ async def test_file_status_backwards_transitions(server, default_user, default_s ) # Cannot go back to PENDING - with pytest.raises(ValueError, match="Cannot transition to PENDING state.*PENDING is only valid as initial state"): + with pytest.raises(ValueError, match=r"Cannot transition to PENDING state.*PENDING is only valid as initial state"): await server.file_manager.update_file_status( file_id=created.id, actor=default_user, diff --git a/tests/managers/test_tool_manager.py b/tests/managers/test_tool_manager.py index 4658acb8..4489d68e 100644 --- a/tests/managers/test_tool_manager.py +++ b/tests/managers/test_tool_manager.py @@ -1,32 +1,8 @@ -import json -import logging -import os -import random -import re -import string -import time import uuid -from datetime import datetime, timedelta, timezone -from typing import List -from unittest.mock import AsyncMock, Mock, patch import pytest -from _pytest.python_api import approx -from anthropic.types.beta import BetaMessage -from anthropic.types.beta.messages import BetaMessageBatchIndividualResponse, BetaMessageBatchSucceededResult # Import shared fixtures and constants from conftest -from conftest import ( - CREATE_DELAY_SQLITE, - DEFAULT_EMBEDDING_CONFIG, - USING_SQLITE, -) -from openai.types.chat.chat_completion_message_tool_call import ChatCompletionMessageToolCall as OpenAIToolCall, Function as OpenAIFunction -from sqlalchemy import func, select -from sqlalchemy.exc import IntegrityError, InvalidRequestError -from sqlalchemy.orm.exc import StaleDataError - -from letta.config import LettaConfig from letta.constants import ( BASE_MEMORY_TOOLS, BASE_SLEEPTIME_TOOLS, @@ -34,73 +10,27 @@ from letta.constants import ( BASE_VOICE_SLEEPTIME_CHAT_TOOLS, BASE_VOICE_SLEEPTIME_TOOLS, BUILTIN_TOOLS, - DEFAULT_ORG_ID, - DEFAULT_ORG_NAME, FILES_TOOLS, - LETTA_TOOL_EXECUTION_DIR, LETTA_TOOL_SET, LOCAL_ONLY_MULTI_AGENT_TOOLS, MCP_TOOL_TAG_NAME_PREFIX, MULTI_AGENT_TOOLS, ) -from letta.data_sources.redis_client import NoopAsyncRedisClient, get_redis_client from letta.errors import LettaAgentNotFoundError -from letta.functions.functions import derive_openai_json_schema, parse_source_code -from letta.functions.mcp_client.types import MCPTool -from letta.helpers import ToolRulesSolver -from letta.helpers.datetime_helpers import AsyncTimer -from letta.jobs.types import ItemUpdateInfo, RequestStatusUpdateInfo, StepStatusUpdateInfo -from letta.orm import Base, Block -from letta.orm.block_history import BlockHistory +from letta.functions.functions import parse_source_code from letta.orm.errors import NoResultFound, UniqueConstraintViolationError -from letta.orm.file import FileContent as FileContentModel, FileMetadata as FileMetadataModel -from letta.schemas.agent import CreateAgent, UpdateAgent -from letta.schemas.block import Block as PydanticBlock, BlockUpdate, CreateBlock +from letta.schemas.agent import CreateAgent from letta.schemas.embedding_config import EmbeddingConfig from letta.schemas.enums import ( - ActorType, - AgentStepStatus, - FileProcessingStatus, - JobStatus, - JobType, - MessageRole, - ProviderType, - SandboxType, - StepStatus, - TagMatchMode, ToolType, - VectorDBProvider, ) -from letta.schemas.environment_variables import SandboxEnvironmentVariableCreate, SandboxEnvironmentVariableUpdate -from letta.schemas.file import FileMetadata, FileMetadata as PydanticFileMetadata -from letta.schemas.identity import IdentityCreate, IdentityProperty, IdentityPropertyType, IdentityType, IdentityUpdate, IdentityUpsert -from letta.schemas.job import BatchJob, Job, Job as PydanticJob, JobUpdate, LettaRequestConfig -from letta.schemas.letta_message import UpdateAssistantMessage, UpdateReasoningMessage, UpdateSystemMessage, UpdateUserMessage -from letta.schemas.letta_message_content import TextContent -from letta.schemas.letta_stop_reason import LettaStopReason, StopReasonType -from letta.schemas.llm_batch_job import AgentStepState, LLMBatchItem from letta.schemas.llm_config import LLMConfig -from letta.schemas.message import Message as PydanticMessage, MessageCreate, MessageUpdate -from letta.schemas.openai.chat_completion_response import UsageStatistics -from letta.schemas.organization import Organization, Organization as PydanticOrganization, OrganizationUpdate -from letta.schemas.passage import Passage as PydanticPassage from letta.schemas.pip_requirement import PipRequirement -from letta.schemas.run import Run as PydanticRun -from letta.schemas.sandbox_config import E2BSandboxConfig, LocalSandboxConfig, SandboxConfigCreate, SandboxConfigUpdate -from letta.schemas.source import Source as PydanticSource, SourceUpdate -from letta.schemas.tool import Tool as PydanticTool, ToolCreate, ToolUpdate -from letta.schemas.tool_rule import InitToolRule -from letta.schemas.user import User as PydanticUser, UserUpdate +from letta.schemas.tool import Tool as PydanticTool, ToolUpdate from letta.server.db import db_registry from letta.server.server import SyncServer -from letta.services.block_manager import BlockManager -from letta.services.helpers.agent_manager_helper import calculate_base_tools, calculate_multi_agent_tools, validate_agent_exists_async -from letta.services.step_manager import FeedbackType from letta.services.tool_schema_generator import generate_schema_for_tool_creation -from letta.settings import settings, tool_settings -from letta.utils import calculate_file_defaults_based_on_context_window -from tests.helpers.utils import comprehensive_agent_checks, validate_context_window_overview -from tests.utils import random_string +from letta.settings import settings # ====================================================================================================================== # AgentManager Tests - Tools Relationship @@ -2015,8 +1945,8 @@ def test_function(): source_code=source_code, ) - with pytest.raises(ValueError) as exc_info: - created_tool = await tool_manager.create_or_update_tool_async(tool, default_user) + with pytest.raises(ValueError): + await tool_manager.create_or_update_tool_async(tool, default_user) async def test_error_on_create_tool_with_name_conflict(server: SyncServer, default_user, default_organization): diff --git a/tests/managers/test_user_manager.py b/tests/managers/test_user_manager.py index eac55f19..6c1e7f82 100644 --- a/tests/managers/test_user_manager.py +++ b/tests/managers/test_user_manager.py @@ -1,104 +1,14 @@ -import logging -import os -import random -import re -import string -import time -import uuid -from datetime import datetime, timedelta, timezone -from typing import List -from unittest.mock import AsyncMock, Mock, patch - import pytest -from _pytest.python_api import approx -from anthropic.types.beta import BetaMessage -from anthropic.types.beta.messages import BetaMessageBatchIndividualResponse, BetaMessageBatchSucceededResult # Import shared fixtures and constants from conftest -from conftest import ( - CREATE_DELAY_SQLITE, - DEFAULT_EMBEDDING_CONFIG, - USING_SQLITE, -) -from openai.types.chat.chat_completion_message_tool_call import ChatCompletionMessageToolCall as OpenAIToolCall, Function as OpenAIFunction -from sqlalchemy import func, select -from sqlalchemy.exc import IntegrityError, InvalidRequestError -from sqlalchemy.orm.exc import StaleDataError - -from letta.config import LettaConfig from letta.constants import ( - BASE_MEMORY_TOOLS, - BASE_SLEEPTIME_TOOLS, - BASE_TOOLS, - BASE_VOICE_SLEEPTIME_CHAT_TOOLS, - BASE_VOICE_SLEEPTIME_TOOLS, - BUILTIN_TOOLS, DEFAULT_ORG_ID, - DEFAULT_ORG_NAME, - FILES_TOOLS, - LETTA_TOOL_EXECUTION_DIR, - LETTA_TOOL_SET, - LOCAL_ONLY_MULTI_AGENT_TOOLS, - MCP_TOOL_TAG_NAME_PREFIX, - MULTI_AGENT_TOOLS, ) from letta.data_sources.redis_client import NoopAsyncRedisClient, get_redis_client -from letta.errors import LettaAgentNotFoundError -from letta.functions.functions import derive_openai_json_schema, parse_source_code -from letta.functions.mcp_client.types import MCPTool -from letta.helpers import ToolRulesSolver from letta.helpers.datetime_helpers import AsyncTimer -from letta.jobs.types import ItemUpdateInfo, RequestStatusUpdateInfo, StepStatusUpdateInfo -from letta.orm import Base, Block -from letta.orm.block_history import BlockHistory -from letta.orm.errors import NoResultFound, UniqueConstraintViolationError -from letta.orm.file import FileContent as FileContentModel, FileMetadata as FileMetadataModel -from letta.schemas.agent import CreateAgent, UpdateAgent -from letta.schemas.block import Block as PydanticBlock, BlockUpdate, CreateBlock -from letta.schemas.embedding_config import EmbeddingConfig -from letta.schemas.enums import ( - ActorType, - AgentStepStatus, - FileProcessingStatus, - JobStatus, - JobType, - MessageRole, - ProviderType, - SandboxType, - StepStatus, - TagMatchMode, - ToolType, - VectorDBProvider, -) -from letta.schemas.environment_variables import SandboxEnvironmentVariableCreate, SandboxEnvironmentVariableUpdate -from letta.schemas.file import FileMetadata, FileMetadata as PydanticFileMetadata -from letta.schemas.identity import IdentityCreate, IdentityProperty, IdentityPropertyType, IdentityType, IdentityUpdate, IdentityUpsert -from letta.schemas.job import BatchJob, Job, Job as PydanticJob, JobUpdate, LettaRequestConfig -from letta.schemas.letta_message import UpdateAssistantMessage, UpdateReasoningMessage, UpdateSystemMessage, UpdateUserMessage -from letta.schemas.letta_message_content import TextContent -from letta.schemas.letta_stop_reason import LettaStopReason, StopReasonType -from letta.schemas.llm_batch_job import AgentStepState, LLMBatchItem -from letta.schemas.llm_config import LLMConfig -from letta.schemas.message import Message as PydanticMessage, MessageCreate, MessageUpdate -from letta.schemas.openai.chat_completion_response import UsageStatistics -from letta.schemas.organization import Organization, Organization as PydanticOrganization, OrganizationUpdate -from letta.schemas.passage import Passage as PydanticPassage -from letta.schemas.pip_requirement import PipRequirement -from letta.schemas.run import Run as PydanticRun -from letta.schemas.sandbox_config import E2BSandboxConfig, LocalSandboxConfig, SandboxConfigCreate, SandboxConfigUpdate -from letta.schemas.source import Source as PydanticSource, SourceUpdate -from letta.schemas.tool import Tool as PydanticTool, ToolCreate, ToolUpdate -from letta.schemas.tool_rule import InitToolRule +from letta.schemas.organization import Organization as PydanticOrganization from letta.schemas.user import User as PydanticUser, UserUpdate -from letta.server.db import db_registry from letta.server.server import SyncServer -from letta.services.block_manager import BlockManager -from letta.services.helpers.agent_manager_helper import calculate_base_tools, calculate_multi_agent_tools, validate_agent_exists_async -from letta.services.step_manager import FeedbackType -from letta.settings import settings, tool_settings -from letta.utils import calculate_file_defaults_based_on_context_window -from tests.helpers.utils import comprehensive_agent_checks, validate_context_window_overview -from tests.utils import random_string # ====================================================================================================================== diff --git a/tests/manual_test_many_messages.py b/tests/manual_test_many_messages.py index 795515ad..3594f923 100644 --- a/tests/manual_test_many_messages.py +++ b/tests/manual_test_many_messages.py @@ -17,7 +17,7 @@ from letta.server.server import SyncServer @pytest.fixture(autouse=True) def truncate_database(): - from letta.server.db import db_context + from letta.server.db import db_context # type: ignore[attr-defined] with db_context() as session: for table in reversed(Base.metadata.sorted_tables): # Reverse to avoid FK issues @@ -94,7 +94,7 @@ def create_send_message(agent_id, organization_id, assistant_text, tool_call_id, "content": [{"type": "text", "text": f"Assistant reply generated at {timestamp.strftime('%Y-%m-%d %I:%M:%S %p PST-0800')}."}], "organization_id": organization_id, "agent_id": agent_id, - "model": "claude-3-5-haiku-20241022", + "model": "claude-haiku-4-5-20251001", "name": None, "tool_calls": [ { @@ -126,7 +126,7 @@ def create_tool_message(agent_id, organization_id, tool_call_id, timestamp): ], "organization_id": organization_id, "agent_id": agent_id, - "model": "claude-3-5-haiku-20241022", + "model": "claude-haiku-4-5-20251001", "name": "send_message", "tool_calls": None, "tool_call_id": tool_call_id, diff --git a/tests/manual_test_multi_agent_broadcast_large.py b/tests/manual_test_multi_agent_broadcast_large.py index 7f992910..8a0f315b 100644 --- a/tests/manual_test_multi_agent_broadcast_large.py +++ b/tests/manual_test_multi_agent_broadcast_large.py @@ -69,12 +69,9 @@ def test_multi_agent_large(server, default_user, roll_dice_tool, num_workers): actor=default_user, ) - manager_agent = server.load_agent(agent_id=manager_agent_state.id, actor=default_user) - # Create N worker agents - worker_agents = [] for idx in tqdm(range(num_workers)): - worker_agent_state = server.create_agent( + server.create_agent( CreateAgent( name=f"worker-{idx}", tool_ids=[roll_dice_tool.id], @@ -86,13 +83,11 @@ def test_multi_agent_large(server, default_user, roll_dice_tool, num_workers): ), actor=default_user, ) - worker_agent = server.load_agent(agent_id=worker_agent_state.id, actor=default_user) - worker_agents.append(worker_agent) # Manager sends broadcast message broadcast_message = f"Send a message to all agents with tags {worker_tags} asking them to roll a dice for you!" server.send_messages( actor=default_user, - agent_id=manager_agent.agent_state.id, + agent_id=manager_agent_state.id, input_messages=[MessageCreate(role="user", content=broadcast_message)], ) diff --git a/tests/mcp_tests/test_mcp.py b/tests/mcp_tests/test_mcp.py index 9930a0cd..618f7269 100644 --- a/tests/mcp_tests/test_mcp.py +++ b/tests/mcp_tests/test_mcp.py @@ -151,6 +151,7 @@ def agent_state(client): client.agents.delete(agent_state.id) +@pytest.mark.skip(reason="The deepwiki SSE MCP server is deprecated") @pytest.mark.asyncio async def test_sse_mcp_server(client, agent_state): mcp_server_name = "deepwiki" diff --git a/tests/mcp_tests/test_mcp_schema_validation.py b/tests/mcp_tests/test_mcp_schema_validation.py index 93eb021b..b049d33c 100644 --- a/tests/mcp_tests/test_mcp_schema_validation.py +++ b/tests/mcp_tests/test_mcp_schema_validation.py @@ -138,7 +138,6 @@ async def test_add_mcp_tool_accepts_non_strict_schemas(): @pytest.mark.asyncio async def test_add_mcp_tool_rejects_invalid_schemas(): """Test that adding MCP tools with invalid schemas is rejected.""" - from fastapi import HTTPException from letta.server.rest_api.routers.v1.tools import add_mcp_tool from letta.settings import tool_settings @@ -465,7 +464,6 @@ def test_mcp_schema_with_uuid_format_required_field(): def test_mcp_schema_complex_nested_with_defs(): """Test generating exact schema with nested Pydantic-like models using $defs.""" - import json from letta.functions.mcp_client.types import MCPToolHealth diff --git a/tests/mcp_tests/test_schema_validator.py b/tests/mcp_tests/test_schema_validator.py index c9dd66b3..41eba96e 100644 --- a/tests/mcp_tests/test_schema_validator.py +++ b/tests/mcp_tests/test_schema_validator.py @@ -186,7 +186,7 @@ class TestSchemaValidator: } # This should actually be STRICT_COMPLIANT since empty arrays with defined items are OK - status, reasons = validate_complete_json_schema(schema) + status, _reasons = validate_complete_json_schema(schema) assert status == SchemaHealth.STRICT_COMPLIANT def test_array_without_constraints_invalid(self): @@ -229,7 +229,6 @@ class TestSchemaValidator: assert status == SchemaHealth.STRICT_COMPLIANT assert reasons == [] - def test_root_level_without_required_non_strict(self): """Test that root-level objects without 'required' field are STRICT_COMPLIANT (validator is relaxed).""" schema = { diff --git a/tests/mock_mcp_server.py b/tests/mock_mcp_server.py index b3381720..a98bfa27 100755 --- a/tests/mock_mcp_server.py +++ b/tests/mock_mcp_server.py @@ -5,10 +5,10 @@ Simple MCP test server with basic and complex tools for testing purposes. import json import logging -from typing import List, Optional, Union +from typing import List, Optional from mcp.server.fastmcp import FastMCP -from pydantic import BaseModel, ConfigDict, Field +from pydantic import BaseModel, Field # Configure logging to stderr (not stdout for STDIO servers) logging.basicConfig(level=logging.INFO) diff --git a/tests/model_settings/lmstudio.json b/tests/model_settings/lmstudio.json new file mode 100644 index 00000000..1ca09b2d --- /dev/null +++ b/tests/model_settings/lmstudio.json @@ -0,0 +1,9 @@ +{ + "handle": "lmstudio_openai/qwen3-4b", + "model_settings": { + "provider_type": "openai", + "temperature": 0.7, + "max_output_tokens": 4096, + "parallel_tool_calls": true + } +} diff --git a/tests/model_settings/ollama.json b/tests/model_settings/ollama.json index 9382a68c..bd905dac 100644 --- a/tests/model_settings/ollama.json +++ b/tests/model_settings/ollama.json @@ -1,9 +1,9 @@ { - "handle": "ollama/qwen2.5:7b", + "handle": "ollama/qwen3:8b", "model_settings": { "provider_type": "openai", - "temperature": 1.0, + "temperature": 0.7, "max_output_tokens": 4096, - "parallel_tool_calls": false + "parallel_tool_calls": true } } diff --git a/tests/model_settings/vllm.json b/tests/model_settings/vllm.json new file mode 100644 index 00000000..0ee9492b --- /dev/null +++ b/tests/model_settings/vllm.json @@ -0,0 +1,9 @@ +{ + "handle": "vllm/Qwen/Qwen3-32B-AWQ", + "model_settings": { + "provider_type": "openai", + "temperature": 0.7, + "max_output_tokens": 4096, + "parallel_tool_calls": true + } +} diff --git a/tests/model_settings/zai-glm-4.6.json b/tests/model_settings/zai-glm-4.6.json index 00ca14c6..e0a2e2f2 100644 --- a/tests/model_settings/zai-glm-4.6.json +++ b/tests/model_settings/zai-glm-4.6.json @@ -4,6 +4,10 @@ "provider_type": "zai", "temperature": 1.0, "max_output_tokens": 4096, - "parallel_tool_calls": false + "parallel_tool_calls": false, + "thinking": { + "type": "enabled", + "clear_thinking": false + } } } diff --git a/tests/model_settings/zai-glm-5.json b/tests/model_settings/zai-glm-5.json new file mode 100644 index 00000000..7c94aecd --- /dev/null +++ b/tests/model_settings/zai-glm-5.json @@ -0,0 +1,13 @@ +{ + "handle": "zai/glm-5", + "model_settings": { + "provider_type": "zai", + "temperature": 1.0, + "max_output_tokens": 4096, + "parallel_tool_calls": false, + "thinking": { + "type": "enabled", + "clear_thinking": false + } + } +} diff --git a/tests/performance_tests/test_agent_mass_creation.py b/tests/performance_tests/test_agent_mass_creation.py index f9dc57db..7888899f 100644 --- a/tests/performance_tests/test_agent_mass_creation.py +++ b/tests/performance_tests/test_agent_mass_creation.py @@ -73,7 +73,7 @@ def roll_dice_tool(client): @pytest.fixture() def rethink_tool(client): - def rethink_memory(agent_state: "AgentState", new_memory: str, target_block_label: str) -> str: # type: ignore + def rethink_memory(agent_state: "AgentState", new_memory: str, target_block_label: str) -> str: # type: ignore # noqa: F821 """ Re-evaluate the memory in block_name, integrating new and updated facts. Replace outdated information with the most likely truths, avoiding redundancy with original memories. diff --git a/tests/performance_tests/test_agent_mass_update.py b/tests/performance_tests/test_agent_mass_update.py index 841462ef..0c84cdd8 100644 --- a/tests/performance_tests/test_agent_mass_update.py +++ b/tests/performance_tests/test_agent_mass_update.py @@ -72,7 +72,7 @@ def roll_dice_tool(client): @pytest.fixture() def rethink_tool(client): - def rethink_memory(agent_state: "AgentState", new_memory: str, target_block_label: str) -> str: # type: ignore + def rethink_memory(agent_state: "AgentState", new_memory: str, target_block_label: str) -> str: # type: ignore # noqa: F821 """ Re-evaluate the memory in block_name, integrating new and updated facts. Replace outdated information with the most likely truths, avoiding redundancy with original memories. diff --git a/tests/performance_tests/test_insert_archival_memory.py b/tests/performance_tests/test_insert_archival_memory.py index 93deedce..04ac2406 100644 --- a/tests/performance_tests/test_insert_archival_memory.py +++ b/tests/performance_tests/test_insert_archival_memory.py @@ -111,7 +111,7 @@ async def test_insert_archival_memories_concurrent(client): cdf_y = np.arange(1, len(durs_sorted) + 1) / len(durs_sorted) # Plot all 6 subplots - fig, axes = plt.subplots(2, 3, figsize=(15, 8)) + _fig, axes = plt.subplots(2, 3, figsize=(15, 8)) axs = axes.ravel() # 1) Kickoff timeline diff --git a/tests/sdk/agents_test.py b/tests/sdk/agents_test.py index 9937f6f4..74079c0d 100644 --- a/tests/sdk/agents_test.py +++ b/tests/sdk/agents_test.py @@ -11,7 +11,7 @@ AGENTS_CREATE_PARAMS = [ # (OpenAIModelSettings defaults to True only when explicitly instantiated) "model_settings": { "max_output_tokens": 16384, - "parallel_tool_calls": False, + "parallel_tool_calls": True, "strict": False, "provider_type": "openai", "temperature": 1.0, @@ -31,7 +31,7 @@ AGENTS_UPDATE_PARAMS = [ # After updating just the name, model_settings should still be present "model_settings": { "max_output_tokens": 16384, - "parallel_tool_calls": False, + "parallel_tool_calls": True, "strict": False, "provider_type": "openai", "temperature": 1.0, diff --git a/tests/sdk/mcp_servers_test.py b/tests/sdk/mcp_servers_test.py index 4fc0a816..94efdec1 100644 --- a/tests/sdk/mcp_servers_test.py +++ b/tests/sdk/mcp_servers_test.py @@ -187,7 +187,7 @@ def get_attr(obj, attr): return getattr(obj, attr, None) -def create_stdio_server_request(server_name: str, command: str = "npx", args: List[str] = None) -> Dict[str, Any]: +def create_stdio_server_request(server_name: str, command: str = "npx", args: List[str] | None = None) -> Dict[str, Any]: """Create a stdio MCP server configuration object. Returns a dict with server_name and config following CreateMCPServerRequest schema. @@ -203,7 +203,7 @@ def create_stdio_server_request(server_name: str, command: str = "npx", args: Li } -def create_sse_server_request(server_name: str, server_url: str = None) -> Dict[str, Any]: +def create_sse_server_request(server_name: str, server_url: str | None = None) -> Dict[str, Any]: """Create an SSE MCP server configuration object. Returns a dict with server_name and config following CreateMCPServerRequest schema. @@ -220,7 +220,7 @@ def create_sse_server_request(server_name: str, server_url: str = None) -> Dict[ } -def create_streamable_http_server_request(server_name: str, server_url: str = None) -> Dict[str, Any]: +def create_streamable_http_server_request(server_name: str, server_url: str | None = None) -> Dict[str, Any]: """Create a streamable HTTP MCP server configuration object. Returns a dict with server_name and config following CreateMCPServerRequest schema. @@ -508,7 +508,7 @@ def test_invalid_server_type(client: Letta): client.mcp_servers.create(**invalid_config) # If we get here without an exception, the test should fail assert False, "Expected an error when creating server with missing required fields" - except (BadRequestError, UnprocessableEntityError, TypeError, ValueError) as e: + except (BadRequestError, UnprocessableEntityError, TypeError, ValueError): # Expected to fail - this is good test_passed = True diff --git a/tests/sdk/search_test.py b/tests/sdk/search_test.py index aec2e169..ac9946ba 100644 --- a/tests/sdk/search_test.py +++ b/tests/sdk/search_test.py @@ -220,7 +220,7 @@ def test_passage_search_basic(client: Letta, enable_turbopuffer): # Clean up archive try: client.archives.delete(archive_id=archive.id) - except: + except Exception: pass finally: @@ -282,7 +282,7 @@ def test_passage_search_with_tags(client: Letta, enable_turbopuffer): # Clean up archive try: client.archives.delete(archive_id=archive.id) - except: + except Exception: pass finally: @@ -350,7 +350,7 @@ def test_passage_search_with_date_filters(client: Letta, enable_turbopuffer): # Clean up archive try: client.archives.delete(archive_id=archive.id) - except: + except Exception: pass finally: @@ -489,7 +489,7 @@ def test_passage_search_pagination(client: Letta, enable_turbopuffer): # Clean up archive try: client.archives.delete(archive_id=archive.id) - except: + except Exception: pass finally: @@ -554,11 +554,11 @@ def test_passage_search_org_wide(client: Letta, enable_turbopuffer): # Clean up archives try: client.archives.delete(archive_id=archive1.id) - except: + except Exception: pass try: client.archives.delete(archive_id=archive2.id) - except: + except Exception: pass finally: diff --git a/tests/test_agent_files/deep-thought.af b/tests/test_agent_files/deep-thought.af index 43b683f2..6a7e932c 100644 --- a/tests/test_agent_files/deep-thought.af +++ b/tests/test_agent_files/deep-thought.af @@ -276,19 +276,19 @@ "type": "string", "description": "Section of the memory to be edited, identified by its label." }, - "old_str": { + "old_string": { "type": "string", "description": "The text to replace (must match exactly, including whitespace and indentation)." }, - "new_str": { + "new_string": { "type": "string", "description": "The new text to insert in place of the old text. Do not include line number prefixes." } }, "required": [ "label", - "old_str", - "new_str" + "old_string", + "new_string" ] }, "type": null, @@ -319,7 +319,7 @@ "type": "string", "description": "Section of the memory to be edited, identified by its label." }, - "new_str": { + "new_string": { "type": "string", "description": "The text to insert. Do not include line number prefixes." }, @@ -330,7 +330,7 @@ }, "required": [ "label", - "new_str" + "new_string" ] }, "type": null, diff --git a/tests/test_agent_files/max_messages.af b/tests/test_agent_files/max_messages.af index 346fed99..87b13fb3 100644 --- a/tests/test_agent_files/max_messages.af +++ b/tests/test_agent_files/max_messages.af @@ -799,7 +799,7 @@ "type": "string", "description": "Section of the memory to be edited, identified by its label." }, - "new_str": { + "new_string": { "type": "string", "description": "The text to insert." }, @@ -810,7 +810,7 @@ }, "required": [ "label", - "new_str" + "new_string" ] }, "type": null, @@ -911,19 +911,19 @@ "type": "string", "description": "Section of the memory to be edited, identified by its label." }, - "old_str": { + "old_string": { "type": "string", "description": "The text to replace (must match exactly, including whitespace and indentation)." }, - "new_str": { + "new_string": { "type": "string", "description": "The new text to insert in place of the old text. Do not include line number prefixes." } }, "required": [ "label", - "old_str", - "new_str" + "old_string", + "new_string" ] }, "type": null, diff --git a/tests/test_agent_files/test_agent.af b/tests/test_agent_files/test_agent.af index 452ce1bc..91f5116c 100644 --- a/tests/test_agent_files/test_agent.af +++ b/tests/test_agent_files/test_agent.af @@ -1678,7 +1678,7 @@ { "id": "tool-5", "tool_type": "letta_memory_core", - "description": "Memory management tool with various sub-commands for memory block operations.\n\nExamples:\n # Replace text in a memory block\n memory(agent_state, \"str_replace\", path=\"/memories/user_preferences\", old_str=\"theme: dark\", new_str=\"theme: light\")\n\n # Insert text at line 5\n memory(agent_state, \"insert\", path=\"/memories/notes\", insert_line=5, insert_text=\"New note here\")\n\n # Delete a memory block\n memory(agent_state, \"delete\", path=\"/memories/old_notes\")\n\n # Rename a memory block\n memory(agent_state, \"rename\", old_path=\"/memories/temp\", new_path=\"/memories/permanent\")\n\n # Update the description of a memory block\n memory(agent_state, \"rename\", path=\"/memories/temp\", description=\"The user's temporary notes.\")\n\n # Create a memory block with starting text\n memory(agent_state, \"create\", path=\"/memories/coding_preferences\", \"description\": \"The user's coding preferences.\", \"file_text\": \"The user seems to add type hints to all of their Python code.\")\n\n # Create an empty memory block\n memory(agent_state, \"create\", path=\"/memories/coding_preferences\", \"description\": \"The user's coding preferences.\")", + "description": "Memory management tool with various sub-commands for memory block operations.\n\nExamples:\n # Replace text in a memory block\n memory(agent_state, \"str_replace\", path=\"/memories/user_preferences\", old_string=\"theme: dark\", new_string=\"theme: light\")\n\n # Insert text at line 5\n memory(agent_state, \"insert\", path=\"/memories/notes\", insert_line=5, insert_text=\"New note here\")\n\n # Delete a memory block\n memory(agent_state, \"delete\", path=\"/memories/old_notes\")\n\n # Rename a memory block\n memory(agent_state, \"rename\", old_path=\"/memories/temp\", new_path=\"/memories/permanent\")\n\n # Update the description of a memory block\n memory(agent_state, \"rename\", path=\"/memories/temp\", description=\"The user's temporary notes.\")\n\n # Create a memory block with starting text\n memory(agent_state, \"create\", path=\"/memories/coding_preferences\", \"description\": \"The user's coding preferences.\", \"file_text\": \"The user seems to add type hints to all of their Python code.\")\n\n # Create an empty memory block\n memory(agent_state, \"create\", path=\"/memories/coding_preferences\", \"description\": \"The user's coding preferences.\")", "source_type": "python", "name": "memory", "tags": [ @@ -1687,7 +1687,7 @@ "source_code": null, "json_schema": { "name": "memory", - "description": "Memory management tool with various sub-commands for memory block operations.\n\nExamples:\n # Replace text in a memory block\n memory(agent_state, \"str_replace\", path=\"/memories/user_preferences\", old_str=\"theme: dark\", new_str=\"theme: light\")\n\n # Insert text at line 5\n memory(agent_state, \"insert\", path=\"/memories/notes\", insert_line=5, insert_text=\"New note here\")\n\n # Delete a memory block\n memory(agent_state, \"delete\", path=\"/memories/old_notes\")\n\n # Rename a memory block\n memory(agent_state, \"rename\", old_path=\"/memories/temp\", new_path=\"/memories/permanent\")\n\n # Update the description of a memory block\n memory(agent_state, \"rename\", path=\"/memories/temp\", description=\"The user's temporary notes.\")\n\n # Create a memory block with starting text\n memory(agent_state, \"create\", path=\"/memories/coding_preferences\", \"description\": \"The user's coding preferences.\", \"file_text\": \"The user seems to add type hints to all of their Python code.\")\n\n # Create an empty memory block\n memory(agent_state, \"create\", path=\"/memories/coding_preferences\", \"description\": \"The user's coding preferences.\")", + "description": "Memory management tool with various sub-commands for memory block operations.\n\nExamples:\n # Replace text in a memory block\n memory(agent_state, \"str_replace\", path=\"/memories/user_preferences\", old_string=\"theme: dark\", new_string=\"theme: light\")\n\n # Insert text at line 5\n memory(agent_state, \"insert\", path=\"/memories/notes\", insert_line=5, insert_text=\"New note here\")\n\n # Delete a memory block\n memory(agent_state, \"delete\", path=\"/memories/old_notes\")\n\n # Rename a memory block\n memory(agent_state, \"rename\", old_path=\"/memories/temp\", new_path=\"/memories/permanent\")\n\n # Update the description of a memory block\n memory(agent_state, \"rename\", path=\"/memories/temp\", description=\"The user's temporary notes.\")\n\n # Create a memory block with starting text\n memory(agent_state, \"create\", path=\"/memories/coding_preferences\", \"description\": \"The user's coding preferences.\", \"file_text\": \"The user seems to add type hints to all of their Python code.\")\n\n # Create an empty memory block\n memory(agent_state, \"create\", path=\"/memories/coding_preferences\", \"description\": \"The user's coding preferences.\")", "parameters": { "type": "object", "properties": { @@ -1707,11 +1707,11 @@ "type": "string", "description": "The description to set in the memory block (for create, rename)" }, - "old_str": { + "old_string": { "type": "string", "description": "Old text to replace (for str_replace)" }, - "new_str": { + "new_string": { "type": "string", "description": "New text to replace with (for str_replace)" }, diff --git a/tests/test_agent_files/test_agent_with_files_and_sources.af b/tests/test_agent_files/test_agent_with_files_and_sources.af index f2b75b2f..81b8cbe9 100644 --- a/tests/test_agent_files/test_agent_with_files_and_sources.af +++ b/tests/test_agent_files/test_agent_with_files_and_sources.af @@ -514,7 +514,7 @@ "type": "string", "description": "Section of the memory to be edited, identified by its label." }, - "new_str": { + "new_string": { "type": "string", "description": "The text to insert. Do not include line number prefixes." }, @@ -525,7 +525,7 @@ }, "required": [ "label", - "new_str" + "new_string" ] } }, @@ -557,19 +557,19 @@ "type": "string", "description": "Section of the memory to be edited, identified by its label." }, - "old_str": { + "old_string": { "type": "string", "description": "The text to replace (must match exactly, including whitespace and indentation)." }, - "new_str": { + "new_string": { "type": "string", "description": "The new text to insert in place of the old text. Do not include line number prefixes." } }, "required": [ "label", - "old_str", - "new_str" + "old_string", + "new_string" ] } }, diff --git a/tests/test_agent_files/test_basic_agent_with_blocks_tools_messages_v2.af b/tests/test_agent_files/test_basic_agent_with_blocks_tools_messages_v2.af index 43103213..a92096e1 100644 --- a/tests/test_agent_files/test_basic_agent_with_blocks_tools_messages_v2.af +++ b/tests/test_agent_files/test_basic_agent_with_blocks_tools_messages_v2.af @@ -574,7 +574,7 @@ { "id": "tool-6", "tool_type": "letta_sleeptime_core", - "description": "The memory_insert command allows you to insert text at a specific location in a memory block.\n\nExamples:\n # Update a block containing information about the user (append to the end of the block)\n memory_insert(label=\"customer\", new_str=\"The customer's ticket number is 12345\")\n\n # Update a block containing information about the user (insert at the beginning of the block)\n memory_insert(label=\"customer\", new_str=\"The customer's ticket number is 12345\", insert_line=0)\n\n Returns:\n Optional[str]: None is always returned as this function does not produce a response.", + "description": "The memory_insert command allows you to insert text at a specific location in a memory block.\n\nExamples:\n # Update a block containing information about the user (append to the end of the block)\n memory_insert(label=\"customer\", new_string=\"The customer's ticket number is 12345\")\n\n # Update a block containing information about the user (insert at the beginning of the block)\n memory_insert(label=\"customer\", new_string=\"The customer's ticket number is 12345\", insert_line=0)\n\n Returns:\n Optional[str]: None is always returned as this function does not produce a response.", "source_type": "python", "name": "memory_insert", "tags": [ @@ -583,7 +583,7 @@ "source_code": null, "json_schema": { "name": "memory_insert", - "description": "The memory_insert command allows you to insert text at a specific location in a memory block.\n\nExamples:\n # Update a block containing information about the user (append to the end of the block)\n memory_insert(label=\"customer\", new_str=\"The customer's ticket number is 12345\")\n\n # Update a block containing information about the user (insert at the beginning of the block)\n memory_insert(label=\"customer\", new_str=\"The customer's ticket number is 12345\", insert_line=0)\n\n Returns:\n Optional[str]: None is always returned as this function does not produce a response.", + "description": "The memory_insert command allows you to insert text at a specific location in a memory block.\n\nExamples:\n # Update a block containing information about the user (append to the end of the block)\n memory_insert(label=\"customer\", new_string=\"The customer's ticket number is 12345\")\n\n # Update a block containing information about the user (insert at the beginning of the block)\n memory_insert(label=\"customer\", new_string=\"The customer's ticket number is 12345\", insert_line=0)\n\n Returns:\n Optional[str]: None is always returned as this function does not produce a response.", "parameters": { "type": "object", "properties": { @@ -591,7 +591,7 @@ "type": "string", "description": "Section of the memory to be edited, identified by its label." }, - "new_str": { + "new_string": { "type": "string", "description": "The text to insert. Do not include line number prefixes." }, @@ -602,7 +602,7 @@ }, "required": [ "label", - "new_str" + "new_string" ] } }, @@ -618,7 +618,7 @@ { "id": "tool-1", "tool_type": "letta_sleeptime_core", - "description": "The memory_replace command allows you to replace a specific string in a memory block with a new string. This is used for making precise edits.\n\nExamples:\n # Update a block containing information about the user\n memory_replace(label=\"human\", old_str=\"Their name is Alice\", new_str=\"Their name is Bob\")\n\n # Update a block containing a todo list\n memory_replace(label=\"todos\", old_str=\"- [ ] Step 5: Search the web\", new_str=\"- [x] Step 5: Search the web\")\n\n # Pass an empty string to\n memory_replace(label=\"human\", old_str=\"Their name is Alice\", new_str=\"\")\n\n # Bad example - do NOT add (view-only) line numbers to the args\n memory_replace(label=\"human\", old_str=\"Line 1: Their name is Alice\", new_str=\"Line 1: Their name is Bob\")\n\n # Bad example - do NOT include the number number warning either\n memory_replace(label=\"human\", old_str=\"# NOTE: Line numbers shown below are to help during editing. Do NOT include line number prefixes in your memory edit tool calls.\\nLine 1: Their name is Alice\", new_str=\"Line 1: Their name is Bob\")\n\n # Good example - no line numbers or line number warning (they are view-only), just the text\n memory_replace(label=\"human\", old_str=\"Their name is Alice\", new_str=\"Their name is Bob\")\n\n Returns:\n str: The success message", + "description": "The memory_replace command allows you to replace a specific string in a memory block with a new string. This is used for making precise edits.\n\nExamples:\n # Update a block containing information about the user\n memory_replace(label=\"human\", old_string=\"Their name is Alice\", new_string=\"Their name is Bob\")\n\n # Update a block containing a todo list\n memory_replace(label=\"todos\", old_string=\"- [ ] Step 5: Search the web\", new_string=\"- [x] Step 5: Search the web\")\n\n # Pass an empty string to\n memory_replace(label=\"human\", old_string=\"Their name is Alice\", new_string=\"\")\n\n # Bad example - do NOT add (view-only) line numbers to the args\n memory_replace(label=\"human\", old_string=\"Line 1: Their name is Alice\", new_string=\"Line 1: Their name is Bob\")\n\n # Bad example - do NOT include the number number warning either\n memory_replace(label=\"human\", old_string=\"# NOTE: Line numbers shown below are to help during editing. Do NOT include line number prefixes in your memory edit tool calls.\\nLine 1: Their name is Alice\", new_string=\"Line 1: Their name is Bob\")\n\n # Good example - no line numbers or line number warning (they are view-only), just the text\n memory_replace(label=\"human\", old_string=\"Their name is Alice\", new_string=\"Their name is Bob\")\n\n Returns:\n str: The success message", "source_type": "python", "name": "memory_replace", "tags": [ @@ -627,7 +627,7 @@ "source_code": null, "json_schema": { "name": "memory_replace", - "description": "The memory_replace command allows you to replace a specific string in a memory block with a new string. This is used for making precise edits.\n\nExamples:\n # Update a block containing information about the user\n memory_replace(label=\"human\", old_str=\"Their name is Alice\", new_str=\"Their name is Bob\")\n\n # Update a block containing a todo list\n memory_replace(label=\"todos\", old_str=\"- [ ] Step 5: Search the web\", new_str=\"- [x] Step 5: Search the web\")\n\n # Pass an empty string to\n memory_replace(label=\"human\", old_str=\"Their name is Alice\", new_str=\"\")\n\n # Bad example - do NOT add (view-only) line numbers to the args\n memory_replace(label=\"human\", old_str=\"Line 1: Their name is Alice\", new_str=\"Line 1: Their name is Bob\")\n\n # Bad example - do NOT include the number number warning either\n memory_replace(label=\"human\", old_str=\"# NOTE: Line numbers shown below are to help during editing. Do NOT include line number prefixes in your memory edit tool calls.\\nLine 1: Their name is Alice\", new_str=\"Line 1: Their name is Bob\")\n\n # Good example - no line numbers or line number warning (they are view-only), just the text\n memory_replace(label=\"human\", old_str=\"Their name is Alice\", new_str=\"Their name is Bob\")\n\n Returns:\n str: The success message", + "description": "The memory_replace command allows you to replace a specific string in a memory block with a new string. This is used for making precise edits.\n\nExamples:\n # Update a block containing information about the user\n memory_replace(label=\"human\", old_string=\"Their name is Alice\", new_string=\"Their name is Bob\")\n\n # Update a block containing a todo list\n memory_replace(label=\"todos\", old_string=\"- [ ] Step 5: Search the web\", new_string=\"- [x] Step 5: Search the web\")\n\n # Pass an empty string to\n memory_replace(label=\"human\", old_string=\"Their name is Alice\", new_string=\"\")\n\n # Bad example - do NOT add (view-only) line numbers to the args\n memory_replace(label=\"human\", old_string=\"Line 1: Their name is Alice\", new_string=\"Line 1: Their name is Bob\")\n\n # Bad example - do NOT include the number number warning either\n memory_replace(label=\"human\", old_string=\"# NOTE: Line numbers shown below are to help during editing. Do NOT include line number prefixes in your memory edit tool calls.\\nLine 1: Their name is Alice\", new_string=\"Line 1: Their name is Bob\")\n\n # Good example - no line numbers or line number warning (they are view-only), just the text\n memory_replace(label=\"human\", old_string=\"Their name is Alice\", new_string=\"Their name is Bob\")\n\n Returns:\n str: The success message", "parameters": { "type": "object", "properties": { @@ -635,19 +635,19 @@ "type": "string", "description": "Section of the memory to be edited, identified by its label." }, - "old_str": { + "old_string": { "type": "string", "description": "The text to replace (must match exactly, including whitespace and indentation)." }, - "new_str": { + "new_string": { "type": "string", "description": "The new text to insert in place of the old text. Do not include line number prefixes." } }, "required": [ "label", - "old_str", - "new_str" + "old_string", + "new_string" ] } }, diff --git a/tests/test_agent_serialization.py b/tests/test_agent_serialization.py index 5213076f..017eb00d 100644 --- a/tests/test_agent_serialization.py +++ b/tests/test_agent_serialization.py @@ -355,7 +355,7 @@ def compare_in_context_message_id_remapping(server, og_agent: AgentState, copy_a remapped IDs but identical relevant content and order. """ # Serialize the original agent state - result = server.agent_manager.serialize(agent_id=og_agent.id, actor=og_user) + server.agent_manager.serialize(agent_id=og_agent.id, actor=og_user) # Retrieve the in-context messages for both the original and the copy # Corrected typo: agent_id instead of agent_id diff --git a/tests/test_agent_serialization_v2.py b/tests/test_agent_serialization_v2.py index 8bc6f21b..2dd76545 100644 --- a/tests/test_agent_serialization_v2.py +++ b/tests/test_agent_serialization_v2.py @@ -774,7 +774,7 @@ class TestFileExport: @pytest.mark.asyncio async def test_basic_file_export(self, default_user, agent_serialization_manager, agent_with_files): """Test basic file export functionality""" - agent_id, source_id, file_id = agent_with_files + agent_id, _source_id, _file_id = agent_with_files exported = await agent_serialization_manager.export([agent_id], actor=default_user) @@ -925,7 +925,7 @@ class TestFileExport: @pytest.mark.asyncio async def test_file_content_inclusion_in_export(self, default_user, agent_serialization_manager, agent_with_files): """Test that file content is included in export""" - agent_id, source_id, file_id = agent_with_files + agent_id, _source_id, _file_id = agent_with_files exported = await agent_serialization_manager.export([agent_id], actor=default_user) @@ -1538,7 +1538,29 @@ class TestAgentFileEdgeCases: imported_agent_id = next(db_id for file_id, db_id in result.id_mappings.items() if file_id == "agent-0") imported_agent = await server.agent_manager.get_agent_by_id_async(imported_agent_id, other_user) - assert len(imported_agent.message_ids) == 0 + assert len(imported_agent.message_ids) == 1 + + async def test_init_with_no_messages_still_has_system_message(self, server, default_user): + """Test that _init_with_no_messages=True still creates a system message so context window doesn't crash.""" + create_agent_request = CreateAgent( + name="partially_initialized_agent", + system="Test system prompt", + llm_config=LLMConfig.default_config("gpt-4o-mini"), + embedding_config=EmbeddingConfig.default_config(provider="openai"), + initial_message_sequence=[], + ) + + agent_state = await server.agent_manager.create_agent_async( + agent_create=create_agent_request, + actor=default_user, + _init_with_no_messages=True, + ) + + assert agent_state.message_ids is not None + assert len(agent_state.message_ids) == 1 + + context_window = await server.agent_manager.get_context_window(agent_id=agent_state.id, actor=default_user) + assert context_window is not None async def test_large_agent_file(self, server, agent_serialization_manager, default_user, other_user, weather_tool): """Test handling of larger agent files with many messages.""" diff --git a/tests/test_client.py b/tests/test_client.py index 7c3b0d08..b50165a4 100644 --- a/tests/test_client.py +++ b/tests/test_client.py @@ -4,6 +4,7 @@ import threading import uuid from http.server import BaseHTTPRequestHandler, HTTPServer +import httpx import pytest from dotenv import load_dotenv from letta_client import APIError, Letta @@ -59,7 +60,7 @@ def mock_openai_server(): self.end_headers() self.wfile.write(body) - def do_GET(self): # noqa: N802 + def do_GET(self): # Support OpenAI model listing used during provider sync. if self.path in ("/v1/models", "/models"): self._send_json( @@ -77,7 +78,7 @@ def mock_openai_server(): self._send_json(404, {"error": {"message": f"Not found: {self.path}"}}) - def do_POST(self): # noqa: N802 + def do_POST(self): # Support embeddings endpoint if self.path not in ("/v1/embeddings", "/embeddings"): self._send_json(404, {"error": {"message": f"Not found: {self.path}"}}) @@ -167,7 +168,7 @@ def agent(client: Letta): agent_state = client.agents.create( name="test_client", memory_blocks=[{"label": "human", "value": ""}, {"label": "persona", "value": ""}], - model="anthropic/claude-haiku-4-5-20251001", + model="anthropic/claude-haiku-4-5", embedding="openai/text-embedding-3-small", ) @@ -183,7 +184,7 @@ def search_agent_one(client: Letta): agent_state = client.agents.create( name="Search Agent One", memory_blocks=[{"label": "human", "value": ""}, {"label": "persona", "value": ""}], - model="anthropic/claude-haiku-4-5-20251001", + model="anthropic/claude-haiku-4-5", embedding="openai/text-embedding-3-small", ) @@ -199,7 +200,7 @@ def search_agent_two(client: Letta): agent_state = client.agents.create( name="Search Agent Two", memory_blocks=[{"label": "human", "value": ""}, {"label": "persona", "value": ""}], - model="anthropic/claude-haiku-4-5-20251001", + model="anthropic/claude-haiku-4-5", embedding="openai/text-embedding-3-small", ) @@ -236,7 +237,7 @@ def test_add_and_manage_tags_for_agent(client: Letta): # Step 0: create an agent with no tags agent = client.agents.create( memory_blocks=[], - model="anthropic/claude-haiku-4-5-20251001", + model="anthropic/claude-haiku-4-5", embedding="openai/text-embedding-3-small", ) assert len(agent.tags) == 0 @@ -280,21 +281,21 @@ def test_agent_tags(client: Letta, clear_tables): agent1 = client.agents.create( name=f"test_agent_{str(uuid.uuid4())}", tags=["test", "agent1", "production"], - model="anthropic/claude-haiku-4-5-20251001", + model="anthropic/claude-haiku-4-5", embedding="openai/text-embedding-3-small", ) agent2 = client.agents.create( name=f"test_agent_{str(uuid.uuid4())}", tags=["test", "agent2", "development"], - model="anthropic/claude-haiku-4-5-20251001", + model="anthropic/claude-haiku-4-5", embedding="openai/text-embedding-3-small", ) agent3 = client.agents.create( name=f"test_agent_{str(uuid.uuid4())}", tags=["test", "agent3", "production"], - model="anthropic/claude-haiku-4-5-20251001", + model="anthropic/claude-haiku-4-5", embedding="openai/text-embedding-3-small", ) @@ -347,14 +348,14 @@ def test_shared_blocks(disable_e2b_api_key, client: Letta): name="agent1", memory_blocks=[{"label": "persona", "value": "you are agent 1"}], block_ids=[block.id], - model="anthropic/claude-haiku-4-5-20251001", + model="anthropic/claude-haiku-4-5", embedding="openai/text-embedding-3-small", ) agent_state2 = client.agents.create( name="agent2", memory_blocks=[{"label": "persona", "value": "you are agent 2"}], block_ids=[block.id], - model="anthropic/claude-haiku-4-5-20251001", + model="anthropic/claude-haiku-4-5", embedding="openai/text-embedding-3-small", ) @@ -373,7 +374,7 @@ def test_update_agent_memory_label(client: Letta): """Test that we can update the label of a block in an agent's memory""" agent = client.agents.create( - model="anthropic/claude-haiku-4-5-20251001", + model="anthropic/claude-haiku-4-5", embedding="openai/text-embedding-3-small", memory_blocks=[{"label": "human", "value": ""}], ) @@ -425,7 +426,7 @@ def test_update_agent_memory_limit(client: Letta): """Test that we can update the limit of a block in an agent's memory""" agent = client.agents.create( - model="anthropic/claude-haiku-4-5-20251001", + model="anthropic/claude-haiku-4-5", embedding="openai/text-embedding-3-small", memory_blocks=[ {"label": "human", "value": "username: sarah", "limit": 1000}, @@ -484,7 +485,7 @@ def test_function_always_error(client: Letta): tool = client.tools.upsert_from_function(func=testing_method) agent = client.agents.create( - model="anthropic/claude-haiku-4-5-20251001", + model="anthropic/claude-haiku-4-5", embedding="openai/text-embedding-3-small", memory_blocks=[ { @@ -686,7 +687,7 @@ def test_agent_creation(client: Letta): }, {"label": "persona", "value": "you are an assistant"}, ], - model="anthropic/claude-haiku-4-5-20251001", + model="anthropic/claude-haiku-4-5", embedding="openai/text-embedding-3-small", tool_ids=[tool1.id, tool2.id], include_base_tools=False, @@ -725,7 +726,7 @@ def test_initial_sequence(client: Letta): # create an agent agent = client.agents.create( memory_blocks=[{"label": "human", "value": ""}, {"label": "persona", "value": ""}], - model="anthropic/claude-haiku-4-5-20251001", + model="anthropic/claude-haiku-4-5", embedding="openai/text-embedding-3-small", initial_message_sequence=[ MessageCreateParam( @@ -738,7 +739,7 @@ def test_initial_sequence(client: Letta): # list messages messages = client.agents.messages.list(agent_id=agent.id).items - response = client.agents.messages.create( + client.agents.messages.create( agent_id=agent.id, messages=[ MessageCreateParam( @@ -757,7 +758,7 @@ def test_initial_sequence(client: Letta): # def test_timezone(client: Letta): # agent = client.agents.create( # memory_blocks=[{"label": "human", "value": ""}, {"label": "persona", "value": ""}], -# model="anthropic/claude-haiku-4-5-20251001", +# model="anthropic/claude-haiku-4-5", # embedding="openai/text-embedding-3-small", # timezone="America/Los_Angeles", # ) @@ -792,7 +793,7 @@ def test_initial_sequence(client: Letta): def test_attach_sleeptime_block(client: Letta): agent = client.agents.create( memory_blocks=[{"label": "human", "value": ""}, {"label": "persona", "value": ""}], - model="anthropic/claude-haiku-4-5-20251001", + model="anthropic/claude-haiku-4-5", embedding="openai/text-embedding-3-small", enable_sleeptime=True, ) @@ -802,7 +803,7 @@ def test_attach_sleeptime_block(client: Letta): group_id = agent.multi_agent_group.id group = client.groups.retrieve(group_id=group_id) agent_ids = group.agent_ids - sleeptime_id = [id for id in agent_ids if id != agent.id][0] + sleeptime_id = next(id for id in agent_ids if id != agent.id) # attach a new block block = client.blocks.create(label="test", value="test") # , project_id="test") @@ -820,3 +821,207 @@ def test_attach_sleeptime_block(client: Letta): # cleanup client.agents.delete(agent.id) + + +# -------------------------------------------------------------------------------------------------------------------- +# Agent Generate Endpoint Tests +# -------------------------------------------------------------------------------------------------------------------- + + +def test_agent_generate_basic(client: Letta, agent: AgentState): + """Test basic generate endpoint with simple prompt.""" + response = httpx.post( + f"{client._client._base_url}/v1/agents/{agent.id}/generate", + json={"prompt": "What is 2+2?"}, + timeout=30.0, + ) + + # Verify successful response + assert response.status_code == 200, f"Expected 200, got {response.status_code}: {response.text}" + + response_data = response.json() + + # Verify response structure + assert response_data is not None + assert "content" in response_data + assert "model" in response_data + assert "usage" in response_data + + # Verify content is returned + assert response_data["content"] is not None + assert len(response_data["content"]) > 0 + assert isinstance(response_data["content"], str) + + # Verify model is set + assert response_data["model"] is not None + assert isinstance(response_data["model"], str) + + # Verify usage statistics + assert response_data["usage"] is not None + assert response_data["usage"]["total_tokens"] > 0 + assert response_data["usage"]["prompt_tokens"] > 0 + assert response_data["usage"]["completion_tokens"] > 0 + + +def test_agent_generate_with_system_prompt(client: Letta, agent: AgentState): + """Test generate endpoint with system prompt.""" + response = httpx.post( + f"{client._client._base_url}/v1/agents/{agent.id}/generate", + json={ + "prompt": "What is your role?", + "system_prompt": "You are a helpful math tutor who always responds with exactly 5 words.", + }, + timeout=30.0, + ) + + # Verify successful response + assert response.status_code == 200, f"Expected 200, got {response.status_code}: {response.text}" + + response_data = response.json() + + # Verify response + assert response_data is not None + assert response_data["content"] is not None + assert len(response_data["content"]) > 0 + + # Verify usage includes system prompt tokens + assert response_data["usage"]["prompt_tokens"] > 10 # Should include system prompt tokens + + +def test_agent_generate_with_model_override(client: Letta, agent: AgentState): + """Test generate endpoint with model override.""" + # Get the agent's current model + + # Use OpenAI model (more likely to be available in test environment) + override_model_handle = "openai/gpt-4o-mini" + + response = httpx.post( + f"{client._client._base_url}/v1/agents/{agent.id}/generate", + json={ + "prompt": "Say hello", + "override_model": override_model_handle, + }, + timeout=30.0, + ) + + # Verify successful response + assert response.status_code == 200, f"Expected 200, got {response.status_code}: {response.text}" + + response_data = response.json() + + # Verify response + assert response_data is not None + assert response_data["content"] is not None + + # Verify the override model was used (model name should be different from original) + # Note: The actual model name in response might be the full model name, not the handle + assert response_data["model"] is not None + + +def test_agent_generate_empty_prompt_error(client: Letta, agent: AgentState): + """Test that empty prompt returns validation error.""" + response = httpx.post( + f"{client._client._base_url}/v1/agents/{agent.id}/generate", + json={"prompt": ""}, # Empty prompt should fail validation + timeout=30.0, + ) + + # Verify it's a validation error (422) + assert response.status_code == 422, f"Expected 422, got {response.status_code}: {response.text}" + + +def test_agent_generate_whitespace_prompt_error(client: Letta, agent: AgentState): + """Test that whitespace-only prompt returns validation error.""" + response = httpx.post( + f"{client._client._base_url}/v1/agents/{agent.id}/generate", + json={"prompt": " \n\t "}, # Whitespace-only prompt should fail validation + timeout=30.0, + ) + + # Verify it's a validation error (422) + assert response.status_code == 422, f"Expected 422, got {response.status_code}: {response.text}" + + +def test_agent_generate_invalid_agent_id(client: Letta): + """Test that invalid agent ID returns 404.""" + # Use properly formatted agent ID that doesn't exist + fake_agent_id = "agent-00000000-0000-4000-8000-000000000000" + + response = httpx.post( + f"{client._client._base_url}/v1/agents/{fake_agent_id}/generate", + json={"prompt": "Hello"}, + timeout=30.0, + ) + + # Verify it's a not found error (404) + assert response.status_code == 404, f"Expected 404, got {response.status_code}: {response.text}" + assert "not found" in response.text.lower() + + +def test_agent_generate_invalid_model_override(client: Letta, agent: AgentState): + """Test that invalid model override returns 404.""" + response = httpx.post( + f"{client._client._base_url}/v1/agents/{agent.id}/generate", + json={ + "prompt": "Hello", + "override_model": "invalid/model-that-does-not-exist", + }, + timeout=30.0, + ) + + # Verify it's a not found error (404) + assert response.status_code == 404, f"Expected 404, got {response.status_code}: {response.text}" + assert "not found" in response.text.lower() or "not accessible" in response.text.lower() + + +def test_agent_generate_long_prompt(client: Letta, agent: AgentState): + """Test generate endpoint with a longer prompt.""" + # Create a longer prompt + long_prompt = " ".join(["This is a test sentence."] * 50) + + response = httpx.post( + f"{client._client._base_url}/v1/agents/{agent.id}/generate", + json={"prompt": long_prompt}, + timeout=30.0, + ) + + # Verify successful response + assert response.status_code == 200, f"Expected 200, got {response.status_code}: {response.text}" + + response_data = response.json() + + # Verify response + assert response_data is not None + assert response_data["content"] is not None + + # Verify token usage reflects the longer prompt + assert response_data["usage"]["prompt_tokens"] > 100 # Should have substantial prompt tokens + + +def test_agent_generate_no_persistence(client: Letta, agent: AgentState): + """Test that generate endpoint does not persist messages to agent.""" + # Get initial message count + initial_messages = client.agents.messages.list(agent_id=agent.id).items + initial_count = len(initial_messages) + + # Make a generate request + response = httpx.post( + f"{client._client._base_url}/v1/agents/{agent.id}/generate", + json={"prompt": "This should not be saved to agent memory"}, + timeout=30.0, + ) + + # Verify successful response + assert response.status_code == 200, f"Expected 200, got {response.status_code}: {response.text}" + + response_data = response.json() + + # Verify response was generated + assert response_data is not None + assert response_data["content"] is not None + + # Verify no new messages were added to the agent + final_messages = client.agents.messages.list(agent_id=agent.id).items + final_count = len(final_messages) + + assert final_count == initial_count, "Generate endpoint should not persist messages" diff --git a/tests/test_context_window_calculator.py b/tests/test_context_window_calculator.py new file mode 100644 index 00000000..2f44a963 --- /dev/null +++ b/tests/test_context_window_calculator.py @@ -0,0 +1,648 @@ +from unittest.mock import AsyncMock, MagicMock + +import pytest + +from letta.services.context_window_calculator.context_window_calculator import ContextWindowCalculator + + +class TestExtractTagContent: + """Tests for the _extract_tag_content helper method""" + + def test_extracts_simple_tag(self): + text = "prefix content suffix" + result = ContextWindowCalculator._extract_tag_content(text, "tag") + assert result == "content" + + def test_returns_none_for_missing_tag(self): + text = "no tags here" + result = ContextWindowCalculator._extract_tag_content(text, "tag") + assert result is None + + def test_returns_none_for_missing_opening_tag(self): + text = "content" + result = ContextWindowCalculator._extract_tag_content(text, "tag") + assert result is None + + def test_returns_none_for_unclosed_tag(self): + text = "content without closing" + result = ContextWindowCalculator._extract_tag_content(text, "tag") + assert result is None + + def test_handles_multiline_content(self): + text = "\nline1\nline2\n" + result = ContextWindowCalculator._extract_tag_content(text, "tag") + assert result == "\nline1\nline2\n" + + def test_handles_nested_content(self): + text = "nested" + result = ContextWindowCalculator._extract_tag_content(text, "outer") + assert result == "nested" + + def test_handles_empty_content(self): + text = "" + result = ContextWindowCalculator._extract_tag_content(text, "tag") + assert result == "" + + def test_extracts_first_occurrence_with_duplicate_tags(self): + """When duplicate tags exist, only the first occurrence is extracted""" + text = "first some text second" + result = ContextWindowCalculator._extract_tag_content(text, "tag") + assert result == "first" + + +class TestExtractSystemComponents: + """Tests for the extract_system_components method""" + + def test_extracts_standard_agent_sections(self): + """Standard agent with base_instructions, memory_blocks, and memory_metadata""" + system_message = """ + +Base prompt here + + + +Core memory content + + + +Metadata here + +""" + result = ContextWindowCalculator.extract_system_components(system_message) + + assert result["system_prompt"] is not None + assert "" in result["system_prompt"] + assert "Base prompt here" in result["system_prompt"] + + assert result["core_memory"] is not None + assert "Core memory content" in result["core_memory"] + + assert result["external_memory_summary"] is not None + assert "" in result["external_memory_summary"] + + # These should be None for standard agent + assert result["memory_filesystem"] is None + assert result["tool_usage_rules"] is None + assert result["directories"] is None + + def test_extracts_git_enabled_agent_sections(self): + """Git-enabled agent has top-level memory_filesystem OUTSIDE memory_blocks""" + system_message = ( + "Base\n" + "\n" + "memory/\n" + " system/\n" + " human.md (100 chars)\n" + "\n" + "Meta" + ) + result = ContextWindowCalculator.extract_system_components(system_message) + + assert result["core_memory"] is None # git-enabled agents don't use + assert result["memory_filesystem"] is not None + assert "memory/" in result["memory_filesystem"] + assert "human.md" in result["memory_filesystem"] + + def test_extracts_tool_usage_rules(self): + """Agent with tool usage rules configured""" + system_message = """ +Base +Memory + +You must use tools in a specific order. + +Meta +""" + result = ContextWindowCalculator.extract_system_components(system_message) + + assert result["tool_usage_rules"] is not None + assert "specific order" in result["tool_usage_rules"] + + def test_extracts_directories(self): + """Agent with attached sources has directories section""" + system_message = """ +Base +Memory + + + +README content + + + +Meta +""" + result = ContextWindowCalculator.extract_system_components(system_message) + + assert result["directories"] is not None + assert '' in result["directories"] + assert "readme.md" in result["directories"] + + def test_handles_react_agent_no_memory_blocks(self): + """React/workflow agents don't render """ + system_message = """ +React agent base + +Some directory content + +Meta +""" + result = ContextWindowCalculator.extract_system_components(system_message) + + assert result["system_prompt"] is not None + assert result["core_memory"] is None # No memory_blocks for react agents + assert result["directories"] is not None + assert result["external_memory_summary"] is not None + + def test_handles_all_sections_present(self): + """Full agent with all optional sections""" + system_message = """ +Base instructions +Memory blocks content +Filesystem tree +Tool rules +Directories content +Metadata +""" + result = ContextWindowCalculator.extract_system_components(system_message) + + assert result["system_prompt"] is not None + assert result["core_memory"] is not None + assert result["memory_filesystem"] is not None + assert result["tool_usage_rules"] is not None + assert result["directories"] is not None + assert result["external_memory_summary"] is not None + + def test_handles_empty_string(self): + """Empty input returns all None values""" + result = ContextWindowCalculator.extract_system_components("") + assert all(v is None for v in result.values()) + + def test_returns_correct_dict_keys(self): + """Verify the returned dict has all expected keys""" + result = ContextWindowCalculator.extract_system_components("") + expected_keys = { + "system_prompt", + "core_memory", + "memory_filesystem", + "tool_usage_rules", + "directories", + "external_memory_summary", + } + assert set(result.keys()) == expected_keys + + def test_no_base_instructions_tag_extracts_preamble(self): + """Custom system prompts without should extract preamble text""" + system_message = ( + "You are a helpful AI agent.\n" + "Use the tools available to you.\n\n" + "\n" + "My name is Letta.\n" + "\n\n" + "Metadata here" + ) + result = ContextWindowCalculator.extract_system_components(system_message) + + assert result["system_prompt"] is not None + assert "helpful AI agent" in result["system_prompt"] + assert "Use the tools" in result["system_prompt"] + # Should NOT include memory_blocks content + assert "" not in result["system_prompt"] + assert "" not in result["system_prompt"] + + assert result["core_memory"] is not None + assert result["external_memory_summary"] is not None + + def test_nested_memory_filesystem_not_extracted_as_top_level(self): + """memory_filesystem block INSIDE memory_blocks should NOT be extracted as top-level""" + system_message = ( + "You are a self-improving AI agent.\n\n" + "\n" + "The following memory blocks are currently engaged:\n\n" + "\n" + "\n" + "/memory/\n" + "\u251c\u2500\u2500 system/\n" + "\u2502 \u251c\u2500\u2500 human.md\n" + "\u2502 \u2514\u2500\u2500 persona.md\n" + "\n" + "\n\n" + "My name is Letta.\n" + "\n\n" + "Metadata" + ) + result = ContextWindowCalculator.extract_system_components(system_message) + + # memory_filesystem is nested inside memory_blocks - should NOT be extracted + assert result["memory_filesystem"] is None + + # core_memory should include the full memory_blocks content (including the nested filesystem) + assert result["core_memory"] is not None + assert "" in result["core_memory"] + assert "human.md" in result["core_memory"] + + def test_top_level_memory_filesystem_outside_memory_blocks(self): + """Top-level memory_filesystem (git-enabled) rendered BEFORE memory_blocks is extracted""" + system_message = ( + "Base\n" + "\n" + "\u251c\u2500\u2500 system/\n" + "\u2502 \u2514\u2500\u2500 human.md\n" + "\n\n" + "\n---\ndescription: About the human\n---\nName: Alice\n\n\n" + "Meta" + ) + result = ContextWindowCalculator.extract_system_components(system_message) + + # This memory_filesystem is top-level (no memory_blocks container) + assert result["memory_filesystem"] is not None + assert "human.md" in result["memory_filesystem"] + + # Bare file blocks after are captured as core_memory + assert result["core_memory"] is not None + assert "" in result["core_memory"] + assert "Name: Alice" in result["core_memory"] + + def test_letta_code_agent_real_format(self): + """Real-world Letta Code agent format: no base_instructions, nested memory_filesystem""" + system_message = ( + "You are a self-improving AI agent with advanced memory.\n" + "You are connected to an interactive CLI tool.\n\n" + "# Memory\n" + "You have an advanced memory system.\n\n" + "\n" + "The following memory blocks are currently engaged:\n\n" + "\n" + "Filesystem view\n" + "\n" + "/memory/\n" + "\u251c\u2500\u2500 system/\n" + "\u2502 \u251c\u2500\u2500 human.md\n" + "\u2502 \u2514\u2500\u2500 persona.md\n" + "\n" + "\n\n" + "\n" + "My name is Letta Code.\n" + "\n\n" + "\n" + "Name: Jin Peng\n" + "\n" + "\n\n" + "\n" + "- The current system date is: February 10, 2026\n" + "- 9663 previous messages in recall memory\n" + "" + ) + result = ContextWindowCalculator.extract_system_components(system_message) + + # System prompt: preamble before + assert result["system_prompt"] is not None + assert "self-improving AI agent" in result["system_prompt"] + assert "advanced memory system" in result["system_prompt"] + assert "" not in result["system_prompt"] + + # Core memory: the full section + assert result["core_memory"] is not None + assert "Letta Code" in result["core_memory"] + assert "Jin Peng" in result["core_memory"] + + # memory_filesystem is NESTED inside memory_blocks - should NOT be extracted + assert result["memory_filesystem"] is None + + # No tool_usage_rules or directories + assert result["tool_usage_rules"] is None + assert result["directories"] is None + + # External memory summary + assert result["external_memory_summary"] is not None + assert "February 10, 2026" in result["external_memory_summary"] + + def test_git_enabled_agent_bare_file_blocks_captured_as_core_memory(self): + """Git-enabled agents render bare file blocks after — these must be captured as core_memory""" + system_message = ( + "Base\n" + "\n" + "\u251c\u2500\u2500 system/\n" + "\u2502 \u251c\u2500\u2500 human.md\n" + "\u2502 \u2514\u2500\u2500 persona.md\n" + "\n\n" + "\n---\ndescription: About the human\nlimit: 2000\n---\nName: Alice\n\n\n" + "\n---\ndescription: Agent persona\n---\nI am a helpful assistant.\n\n\n" + "Always call send_message to respond.\n" + "Meta" + ) + result = ContextWindowCalculator.extract_system_components(system_message) + + # memory_filesystem should preserve tree connectors with deterministic ordering + assert result["memory_filesystem"] is not None + assert "\u251c\u2500\u2500 system/" in result["memory_filesystem"] + + # core_memory should capture the bare file blocks + assert result["core_memory"] is not None + assert "" in result["core_memory"] + assert "Name: Alice" in result["core_memory"] + assert "" in result["core_memory"] + assert "helpful assistant" in result["core_memory"] + + # tool_usage_rules should NOT be included in core_memory + assert "" not in result["core_memory"] + + # Other sections + assert result["tool_usage_rules"] is not None + assert result["external_memory_summary"] is not None + + def test_git_enabled_agent_no_bare_blocks(self): + """Git-enabled agent with no file blocks after memory_filesystem returns None for core_memory""" + system_message = ( + "Base\n" + "\n" + "\u251c\u2500\u2500 system/\n" + "\n" + "Meta" + ) + result = ContextWindowCalculator.extract_system_components(system_message) + assert result["memory_filesystem"] is not None + assert result["core_memory"] is None + + def test_extract_top_level_tag_dual_occurrence_nested_first(self): + """When a tag appears nested first and top-level later, the top-level one is extracted""" + system_message = ( + "\n" + "nested rules\n" + "\n\n" + "top-level rules" + ) + result = ContextWindowCalculator._extract_top_level_tag(system_message, "tool_usage_rules") + assert result is not None + assert "top-level rules" in result + assert "nested rules" not in result + + def test_extract_system_prompt_pure_text_no_tags(self): + """System message with no section tags at all returns the full text as system_prompt""" + system_message = "You are a simple agent.\nYou help the user with tasks." + result = ContextWindowCalculator._extract_system_prompt(system_message) + assert result is not None + assert "simple agent" in result + assert "help the user" in result + + def test_git_backed_memory_without_memory_blocks_wrapper(self): + """Regression test from main: git-backed agents without wrapper""" + system_message = """You are some system prompt. + + +Memory Directory: ~/.letta/agents/agent-123/memory + +/memory/ +\u2514\u2500\u2500 system/ + \u2514\u2500\u2500 human.md + + + +--- +description: test +limit: 10 +--- +hello + + + +- foo=bar + +""" + result = ContextWindowCalculator.extract_system_components(system_message) + + assert "You are some system prompt" in result["system_prompt"] + # memory_filesystem is a top-level section + assert result["memory_filesystem"] is not None + assert "" in result["memory_filesystem"] + # bare file blocks are captured as core_memory + assert result["core_memory"] is not None + assert "" in result["core_memory"] + assert result["external_memory_summary"].startswith("") + + def test_legacy_memory_blocks_wrapper(self): + """Regression test from main: legacy memory_blocks wrapper is properly parsed""" + system_message = """SYS + + +p + + + +- x=y + +""" + result = ContextWindowCalculator.extract_system_components(system_message) + + assert result["system_prompt"].startswith("") + assert result["core_memory"].startswith("") + assert result["external_memory_summary"].startswith("") + + +def _make_system_message(text: str): + """Helper to create a real Message object for use as a system message in tests.""" + from letta.schemas.enums import MessageRole + from letta.schemas.letta_message_content import TextContent + from letta.schemas.message import Message + + return Message(role=MessageRole.system, content=[TextContent(text=text)]) + + +def _make_mock_deps(system_text: str): + """Helper to create mocked token_counter, message_manager, and agent_state.""" + token_counter = MagicMock() + token_counter.count_text_tokens = AsyncMock(side_effect=lambda text: len(text) if text else 0) + token_counter.count_message_tokens = AsyncMock(return_value=0) + token_counter.count_tool_tokens = AsyncMock(return_value=0) + token_counter.convert_messages = MagicMock(return_value=[{"role": "system", "content": system_text}]) + + message_manager = MagicMock() + message_manager.get_messages_by_ids_async = AsyncMock(return_value=[]) + + agent_state = MagicMock() + agent_state.id = "agent-test" + agent_state.message_ids = ["msg-sys"] + agent_state.system = "fallback system prompt" + agent_state.tools = [] + agent_state.llm_config.context_window = 128000 + + actor = MagicMock() + + return token_counter, message_manager, agent_state, actor + + +class TestCalculateContextWindow: + """Integration tests for calculate_context_window with mocked dependencies""" + + @pytest.mark.asyncio + async def test_calculate_context_window_standard_agent(self): + """Test full context window calculation with a standard system message""" + system_text = ( + "You are a helpful agent.\n" + "human: User is Alice\n" + "Archival: 5 passages" + ) + + system_msg = _make_system_message(system_text) + token_counter, message_manager, agent_state, actor = _make_mock_deps(system_text) + + calculator = ContextWindowCalculator() + result = await calculator.calculate_context_window( + agent_state=agent_state, + actor=actor, + token_counter=token_counter, + message_manager=message_manager, + system_message_compiled=system_msg, + num_archival_memories=5, + num_messages=10, + message_ids=[], + ) + + assert result.context_window_size_max == 128000 + assert result.num_archival_memory == 5 + assert result.num_recall_memory == 10 + assert result.num_tokens_system > 0 + assert "helpful agent" in result.system_prompt + assert result.num_tokens_core_memory > 0 + assert "User is Alice" in result.core_memory + assert result.num_tokens_external_memory_summary > 0 + + # New sections should be None/0 since not in system message + assert result.memory_filesystem is None + assert result.num_tokens_memory_filesystem == 0 + assert result.tool_usage_rules is None + assert result.num_tokens_tool_usage_rules == 0 + assert result.directories is None + assert result.num_tokens_directories == 0 + + @pytest.mark.asyncio + async def test_calculate_context_window_skips_empty_sections(self): + """Verify that token counting is skipped for empty/missing sections""" + # Only base_instructions, no other sections + system_text = "Simple agent" + + system_msg = _make_system_message(system_text) + token_counter, message_manager, agent_state, actor = _make_mock_deps(system_text) + + calculator = ContextWindowCalculator() + await calculator.calculate_context_window( + agent_state=agent_state, + actor=actor, + token_counter=token_counter, + message_manager=message_manager, + system_message_compiled=system_msg, + num_archival_memories=0, + num_messages=0, + message_ids=[], + ) + + # count_text_tokens should only be called for system_prompt (non-empty) + # and NOT for core_memory, memory_filesystem, tool_usage_rules, directories, + # external_memory_summary, or summary_memory (all empty/None) + calls = token_counter.count_text_tokens.call_args_list + assert len(calls) == 1, f"Expected 1 call to count_text_tokens (system_prompt only), got {len(calls)}: {calls}" + + @pytest.mark.asyncio + async def test_calculate_context_window_all_sections(self): + """Test with all optional sections present""" + system_text = ( + "Agent instructions\n" + "Core memory\n" + "\u251c\u2500\u2500 system/\n\u2502 \u2514\u2500\u2500 human.md\n" + "Always call search first\n" + 'content\n' + "Archival: 10 passages" + ) + + system_msg = _make_system_message(system_text) + token_counter, message_manager, agent_state, actor = _make_mock_deps(system_text) + + calculator = ContextWindowCalculator() + result = await calculator.calculate_context_window( + agent_state=agent_state, + actor=actor, + token_counter=token_counter, + message_manager=message_manager, + system_message_compiled=system_msg, + num_archival_memories=10, + num_messages=5, + message_ids=[], + ) + + # All sections should be populated + assert result.num_tokens_system > 0 + assert result.num_tokens_core_memory > 0 + assert result.num_tokens_memory_filesystem > 0 + assert result.memory_filesystem is not None + assert result.num_tokens_tool_usage_rules > 0 + assert result.tool_usage_rules is not None + assert result.num_tokens_directories > 0 + assert result.directories is not None + assert result.num_tokens_external_memory_summary > 0 + + # Verify total is sum of all parts + expected_total = ( + result.num_tokens_system + + result.num_tokens_core_memory + + result.num_tokens_memory_filesystem + + result.num_tokens_tool_usage_rules + + result.num_tokens_directories + + result.num_tokens_external_memory_summary + + result.num_tokens_summary_memory + + result.num_tokens_messages + + result.num_tokens_functions_definitions + ) + assert result.context_window_size_current == expected_total + + @pytest.mark.asyncio + async def test_calculate_context_window_git_enabled_agent(self): + """Test that git-enabled agents capture bare file blocks as core_memory""" + system_text = ( + "Git agent\n" + "\n" + "\u251c\u2500\u2500 system/\n" + "\u2502 \u251c\u2500\u2500 human.md\n" + "\u2502 \u2514\u2500\u2500 persona.md\n" + "\n\n" + "\n---\ndescription: About the human\n---\nName: Alice\n\n\n" + "\n---\ndescription: Agent persona\n---\nI am helpful.\n\n\n" + "Archival: 3 passages" + ) + + system_msg = _make_system_message(system_text) + token_counter, message_manager, agent_state, actor = _make_mock_deps(system_text) + + calculator = ContextWindowCalculator() + result = await calculator.calculate_context_window( + agent_state=agent_state, + actor=actor, + token_counter=token_counter, + message_manager=message_manager, + system_message_compiled=system_msg, + num_archival_memories=3, + num_messages=5, + message_ids=[], + ) + + # memory_filesystem should capture the tree view + assert result.memory_filesystem is not None + assert result.num_tokens_memory_filesystem > 0 + + # core_memory should capture the bare file blocks + assert result.num_tokens_core_memory > 0 + assert "Name: Alice" in result.core_memory + assert "" in result.core_memory + + # Total should include all sections + expected_total = ( + result.num_tokens_system + + result.num_tokens_core_memory + + result.num_tokens_memory_filesystem + + result.num_tokens_tool_usage_rules + + result.num_tokens_directories + + result.num_tokens_external_memory_summary + + result.num_tokens_summary_memory + + result.num_tokens_messages + + result.num_tokens_functions_definitions + ) + assert result.context_window_size_current == expected_total diff --git a/tests/test_crypto_utils.py b/tests/test_crypto_utils.py index 6ceabdd5..259548be 100644 --- a/tests/test_crypto_utils.py +++ b/tests/test_crypto_utils.py @@ -1,7 +1,5 @@ import base64 import json -import os -from unittest.mock import patch import pytest diff --git a/tests/test_exception_logging.py b/tests/test_exception_logging.py index 2fa952cc..6907ec7d 100644 --- a/tests/test_exception_logging.py +++ b/tests/test_exception_logging.py @@ -3,13 +3,11 @@ Tests for global exception logging system. """ import asyncio -import logging -from unittest.mock import MagicMock, patch +from unittest.mock import patch import pytest -from fastapi import FastAPI, Request +from fastapi import FastAPI from fastapi.testclient import TestClient -from starlette.middleware.base import BaseHTTPMiddleware from letta.exceptions.logging import add_exception_context, log_and_raise, log_exception from letta.server.rest_api.middleware.logging import LoggingMiddleware diff --git a/tests/test_google_embeddings.py b/tests/test_google_embeddings.py index 5f879933..3304135b 100644 --- a/tests/test_google_embeddings.py +++ b/tests/test_google_embeddings.py @@ -2,14 +2,13 @@ import httpx import pytest from dotenv import load_dotenv -from letta.embeddings import GoogleEmbeddings # Adjust the import based on your module structure +from letta.embeddings import GoogleEmbeddings # type: ignore[import-untyped] # Adjust the import based on your module structure load_dotenv() import os import threading import time -import pytest from letta_client import CreateBlock, Letta as LettaSDKClient, MessageCreate SERVER_PORT = 8283 diff --git a/tests/test_google_schema_refs.py b/tests/test_google_schema_refs.py new file mode 100644 index 00000000..ff10e1cc --- /dev/null +++ b/tests/test_google_schema_refs.py @@ -0,0 +1,178 @@ +"""Unit tests for GoogleVertexClient._resolve_json_schema_refs and $ref safety net.""" + +import pytest + +from letta.llm_api.google_vertex_client import GoogleVertexClient + + +@pytest.fixture +def client(): + return GoogleVertexClient() + + +class TestResolveJsonSchemaRefs: + def test_single_def_with_ref(self, client): + schema = { + "type": "object", + "properties": { + "status": {"$ref": "#/$defs/StatusEnum"}, + }, + "$defs": { + "StatusEnum": {"type": "string", "enum": ["active", "inactive"]}, + }, + } + result = client._resolve_json_schema_refs(schema) + assert "$defs" not in result + assert result["properties"]["status"] == {"type": "string", "enum": ["active", "inactive"]} + + def test_multiple_defs(self, client): + schema = { + "type": "object", + "properties": { + "ticket": {"$ref": "#/$defs/TicketStatus"}, + "report": {"$ref": "#/$defs/ReportType"}, + }, + "$defs": { + "TicketStatus": {"type": "string", "enum": ["open", "closed"]}, + "ReportType": {"type": "string", "enum": ["summary", "detailed"]}, + }, + } + result = client._resolve_json_schema_refs(schema) + assert "$defs" not in result + assert result["properties"]["ticket"] == {"type": "string", "enum": ["open", "closed"]} + assert result["properties"]["report"] == {"type": "string", "enum": ["summary", "detailed"]} + + def test_nested_ref_in_def(self, client): + schema = { + "type": "object", + "properties": { + "order": {"$ref": "#/$defs/Order"}, + }, + "$defs": { + "Order": { + "type": "object", + "properties": { + "status": {"$ref": "#/$defs/OrderStatus"}, + }, + }, + "OrderStatus": {"type": "string", "enum": ["pending", "shipped"]}, + }, + } + result = client._resolve_json_schema_refs(schema) + assert "$defs" not in result + assert result["properties"]["order"]["properties"]["status"] == {"type": "string", "enum": ["pending", "shipped"]} + + def test_ref_inside_anyof(self, client): + schema = { + "type": "object", + "properties": { + "value": { + "anyOf": [ + {"$ref": "#/$defs/StringVal"}, + {"type": "null"}, + ] + }, + }, + "$defs": { + "StringVal": {"type": "string", "maxLength": 100}, + }, + } + result = client._resolve_json_schema_refs(schema) + assert "$defs" not in result + assert result["properties"]["value"]["anyOf"][0] == {"type": "string", "maxLength": 100} + assert result["properties"]["value"]["anyOf"][1] == {"type": "null"} + + def test_ref_inside_allof(self, client): + schema = { + "type": "object", + "properties": { + "item": {"allOf": [{"$ref": "#/$defs/Base"}, {"type": "object", "properties": {"extra": {"type": "string"}}}]}, + }, + "$defs": { + "Base": {"type": "object", "properties": {"name": {"type": "string"}}}, + }, + } + result = client._resolve_json_schema_refs(schema) + assert result["properties"]["item"]["allOf"][0] == {"type": "object", "properties": {"name": {"type": "string"}}} + + def test_no_defs_is_noop(self, client): + schema = { + "type": "object", + "properties": { + "name": {"type": "string"}, + }, + } + result = client._resolve_json_schema_refs(schema) + assert result == schema + + def test_definitions_key(self, client): + schema = { + "type": "object", + "properties": { + "role": {"$ref": "#/definitions/Role"}, + }, + "definitions": { + "Role": {"type": "string", "enum": ["admin", "user"]}, + }, + } + result = client._resolve_json_schema_refs(schema) + assert "definitions" not in result + assert result["properties"]["role"] == {"type": "string", "enum": ["admin", "user"]} + + def test_unresolvable_ref_logged(self, client): + schema = { + "type": "object", + "properties": { + "thing": {"$ref": "#/properties/other/nested"}, + }, + } + result = client._resolve_json_schema_refs(schema) + assert "$ref" in result["properties"]["thing"] + + def test_ref_in_array_items(self, client): + schema = { + "type": "object", + "properties": { + "tags": { + "type": "array", + "items": {"$ref": "#/$defs/Tag"}, + }, + }, + "$defs": { + "Tag": {"type": "string", "enum": ["urgent", "low"]}, + }, + } + result = client._resolve_json_schema_refs(schema) + assert "$defs" not in result + assert result["properties"]["tags"]["items"] == {"type": "string", "enum": ["urgent", "low"]} + + +class TestCleanSchemaStripsUnresolvedRefs: + def test_ref_stripped_by_cleaner(self, client): + schema = { + "type": "object", + "properties": { + "thing": {"$ref": "#/properties/other/nested", "type": "string"}, + }, + } + client._clean_google_ai_schema_properties(schema) + assert "$ref" not in schema["properties"]["thing"] + assert schema["properties"]["thing"]["type"] == "string" + + def test_full_pipeline_resolves_then_cleans(self, client): + schema = { + "type": "object", + "properties": { + "status": {"$ref": "#/$defs/Status"}, + "weird": {"$ref": "#/properties/foo/bar", "type": "string"}, + }, + "$defs": { + "Status": {"type": "string", "enum": ["a", "b"], "default": "a"}, + }, + } + resolved = client._resolve_json_schema_refs(schema) + client._clean_google_ai_schema_properties(resolved) + assert "$defs" not in resolved + assert "$ref" not in resolved["properties"]["weird"] + assert resolved["properties"]["status"]["enum"] == ["a", "b"] + assert "default" not in resolved["properties"]["status"] diff --git a/tests/test_internal_agents_count.py b/tests/test_internal_agents_count.py index 534296a0..34997a64 100644 --- a/tests/test_internal_agents_count.py +++ b/tests/test_internal_agents_count.py @@ -1,4 +1,3 @@ -import os from typing import List import httpx @@ -57,7 +56,7 @@ def test_agents(client: Letta) -> List[AgentState]: for agent in agents: try: client.agents.delete(agent.id) - except: + except Exception: pass diff --git a/tests/test_letta_agent_batch.py b/tests/test_letta_agent_batch.py index c2f5b718..edd1ccc3 100644 --- a/tests/test_letta_agent_batch.py +++ b/tests/test_letta_agent_batch.py @@ -1,4 +1,5 @@ import asyncio +import itertools from datetime import datetime, timezone from typing import Tuple from unittest.mock import AsyncMock, patch @@ -36,7 +37,7 @@ from tests.utils import create_tool_from_func # Model identifiers used in tests MODELS = { "sonnet": "anthropic/claude-sonnet-4-20250514", - "haiku": "anthropic/claude-haiku-4-5-20251001", + "haiku": "anthropic/claude-haiku-4-5", "opus": "anthropic/claude-opus-4-1-20250805", } @@ -769,7 +770,7 @@ def _assert_descending_order(messages): if len(messages) <= 1: return True - for prev, next in zip(messages[:-1], messages[1:]): + for prev, next in itertools.pairwise(messages): assert prev.created_at >= next.created_at, ( f"Order violation: {prev.id} ({prev.created_at}) followed by {next.id} ({next.created_at})" ) diff --git a/tests/test_llm_clients.py b/tests/test_llm_clients.py index 7ceb278c..54e97284 100644 --- a/tests/test_llm_clients.py +++ b/tests/test_llm_clients.py @@ -101,7 +101,7 @@ async def test_send_llm_batch_request_async_mismatched_keys(anthropic_client, mo a ValueError is raised. """ mismatched_tools = {"agent-2": []} # Different agent ID than in the messages mapping. - with pytest.raises(ValueError, match="Agent mappings for messages and tools must use the same agent_ids."): + with pytest.raises(ValueError, match=r"Agent mappings for messages and tools must use the same agent_ids."): await anthropic_client.send_llm_batch_request_async( AgentType.memgpt_agent, mock_agent_messages, mismatched_tools, mock_agent_llm_config ) diff --git a/tests/test_log_context.py b/tests/test_log_context.py index 45ae3a93..3af18f84 100644 --- a/tests/test_log_context.py +++ b/tests/test_log_context.py @@ -2,8 +2,6 @@ import json import logging from io import StringIO -import pytest - from letta.log import JSONFormatter, LogContextFilter from letta.log_context import clear_log_context, get_log_context, remove_log_context, set_log_context, update_log_context diff --git a/tests/test_log_context_middleware.py b/tests/test_log_context_middleware.py index 546f169f..bf6e650e 100644 --- a/tests/test_log_context_middleware.py +++ b/tests/test_log_context_middleware.py @@ -1,7 +1,10 @@ +from unittest.mock import patch + import pytest from fastapi import FastAPI from fastapi.testclient import TestClient +import letta.server.rest_api.routers.v1.git_http as git_http_router from letta.log_context import get_log_context from letta.server.rest_api.middleware import LoggingMiddleware @@ -35,6 +38,64 @@ def client(app): class TestLogContextMiddleware: + @pytest.mark.asyncio + async def test_sync_after_push_syncs_nested_block_labels_to_postgres(self, monkeypatch): + """Regression test: nested labels (e.g., system/human) are synced from git files.""" + + synced_calls = [] + + class DummyActor: + id = "user-123" + organization_id = "org-123" + + class DummyGit: + async def get_files(self, agent_id, org_id, ref): + assert ref == "HEAD" + return { + "system/human.md": "---\ndescription: human\nlimit: 20000\n---\nname: sarah", + "system/persona.md": "---\ndescription: persona\nlimit: 20000\n---\nbe helpful", + } + + class DummyMemoryRepoManager: + git = DummyGit() + + class DummyBlockManager: + async def _sync_block_to_postgres(self, **kwargs): + synced_calls.append(kwargs) + + class DummyAgentManager: + async def list_agent_blocks_async(self, **kwargs): + return [] + + class DummyUserManager: + async def get_actor_by_id_async(self, actor_id): + return DummyActor() + + class DummyServer: + user_manager = DummyUserManager() + memory_repo_manager = DummyMemoryRepoManager() + block_manager = DummyBlockManager() + agent_manager = DummyAgentManager() + + class DummyGitEnabledBlockManager(DummyBlockManager): + pass + + dummy_server = DummyServer() + dummy_server.block_manager = DummyGitEnabledBlockManager() + + monkeypatch.setattr(git_http_router, "_server_instance", dummy_server) + + from letta.settings import settings as core_settings + + monkeypatch.setattr(core_settings, "memfs_service_url", "http://memfs.test") + + with patch("letta.services.block_manager_git.GitEnabledBlockManager", DummyGitEnabledBlockManager): + await git_http_router._sync_after_push(actor_id="user-123", agent_id="agent-123") + + labels = {call["label"] for call in synced_calls} + assert "system/human" in labels + assert "system/persona" in labels + def test_extracts_actor_id_from_headers(self, client): response = client.get("/v1/agents/agent-123e4567-e89b-42d3-8456-426614174000", headers={"user_id": "user-abc123"}) assert response.status_code == 200 diff --git a/tests/test_mcp_encryption.py b/tests/test_mcp_encryption.py index e8c9c0f1..d37d6931 100644 --- a/tests/test_mcp_encryption.py +++ b/tests/test_mcp_encryption.py @@ -4,9 +4,8 @@ Tests the end-to-end encryption functionality in the MCP manager. """ import json -import os from datetime import datetime, timezone -from unittest.mock import AsyncMock, Mock, patch +from unittest.mock import AsyncMock, patch from uuid import uuid4 import pytest @@ -20,13 +19,9 @@ from letta.schemas.mcp import ( MCPOAuthSessionUpdate, MCPServer as PydanticMCPServer, MCPServerType, - SSEServerConfig, - StdioServerConfig, ) -from letta.schemas.secret import Secret from letta.server.db import db_registry from letta.server.server import SyncServer -from letta.services.mcp_manager import MCPManager from letta.settings import settings diff --git a/tests/test_memory.py b/tests/test_memory.py index 26df7e41..7c83bebe 100644 --- a/tests/test_memory.py +++ b/tests/test_memory.py @@ -223,3 +223,33 @@ def test_current_files_open_counts_truthy_only(): m = Memory(agent_type=AgentType.react_agent, blocks=[], file_blocks=[fb1, fb2, fb3]) out = m.compile(sources=[src], max_files_open=10) assert "- current_files_open=1" in out + + +def test_compile_git_memory_filesystem_handles_leaf_directory_collisions(): + """Git memory filesystem rendering should tolerate label prefix collisions. + + Example collisions: + - leaf at "system" and children under "system/..." + - leaf at "system/human" and children under "system/human/..." + + These occur naturally in git-backed memory where both index-like blocks and + nested blocks can exist. + """ + + m = Memory( + agent_type=AgentType.letta_v1_agent, + git_enabled=True, + blocks=[ + Block(label="system", value="root", limit=100), + Block(label="system/human", value="human index", limit=100), + Block(label="system/human/context", value="context", limit=100), + ], + ) + + out = m.compile() + + # Should include the filesystem view and not raise. + assert "" in out + assert "system/" in out + assert "system.md" in out + assert "human.md" in out diff --git a/tests/test_minimax_client.py b/tests/test_minimax_client.py index 32ac1d61..1e596fa3 100644 --- a/tests/test_minimax_client.py +++ b/tests/test_minimax_client.py @@ -4,10 +4,13 @@ from unittest.mock import AsyncMock, MagicMock, patch import pytest -from letta.llm_api.minimax_client import MINIMAX_BASE_URL, MiniMaxClient +from letta.llm_api.minimax_client import MiniMaxClient from letta.schemas.enums import AgentType from letta.schemas.llm_config import LLMConfig +# MiniMax API base URL +MINIMAX_BASE_URL = "https://api.minimax.io/anthropic" + class TestMiniMaxClient: """Tests for MiniMaxClient.""" @@ -55,7 +58,7 @@ class TestMiniMaxClient: # Mock BYOK to return no override self.client.get_byok_overrides = MagicMock(return_value=(None, None, None)) - client = self.client._get_anthropic_client(self.llm_config, async_client=False) + self.client._get_anthropic_client(self.llm_config, async_client=False) mock_anthropic.Anthropic.assert_called_once_with( api_key="test-api-key", @@ -73,7 +76,7 @@ class TestMiniMaxClient: # Mock BYOK to return no override self.client.get_byok_overrides = MagicMock(return_value=(None, None, None)) - client = self.client._get_anthropic_client(self.llm_config, async_client=True) + self.client._get_anthropic_client(self.llm_config, async_client=True) mock_anthropic.AsyncAnthropic.assert_called_once_with( api_key="test-api-key", @@ -100,7 +103,7 @@ class TestMiniMaxClientTemperatureClamping: """Verify build_request_data is called for temperature clamping.""" # This is a basic test to ensure the method exists and can be called mock_build.return_value = {"temperature": 0.7} - result = self.client.build_request_data( + self.client.build_request_data( agent_type=AgentType.letta_v1_agent, messages=[], llm_config=self.llm_config, @@ -214,7 +217,7 @@ class TestMiniMaxClientUsesNonBetaAPI: mock_anthropic_client.messages.create.return_value = mock_response mock_get_client.return_value = mock_anthropic_client - result = client.request({"model": "MiniMax-M2.1"}, llm_config) + client.request({"model": "MiniMax-M2.1"}, llm_config) # Verify messages.create was called (not beta.messages.create) mock_anthropic_client.messages.create.assert_called_once() @@ -239,7 +242,7 @@ class TestMiniMaxClientUsesNonBetaAPI: mock_anthropic_client.messages.create.return_value = mock_response mock_get_client.return_value = mock_anthropic_client - result = await client.request_async({"model": "MiniMax-M2.1"}, llm_config) + await client.request_async({"model": "MiniMax-M2.1"}, llm_config) # Verify messages.create was called (not beta.messages.create) mock_anthropic_client.messages.create.assert_called_once() @@ -261,7 +264,7 @@ class TestMiniMaxClientUsesNonBetaAPI: mock_anthropic_client.messages.create.return_value = mock_stream mock_get_client.return_value = mock_anthropic_client - result = await client.stream_async({"model": "MiniMax-M2.1"}, llm_config) + await client.stream_async({"model": "MiniMax-M2.1"}, llm_config) # Verify messages.create was called (not beta.messages.create) mock_anthropic_client.messages.create.assert_called_once() diff --git a/tests/test_multi_agent.py b/tests/test_multi_agent.py index 28a4ff3a..931e8118 100644 --- a/tests/test_multi_agent.py +++ b/tests/test_multi_agent.py @@ -1,20 +1,14 @@ -import asyncio - import pytest from letta.config import LettaConfig from letta.schemas.agent import CreateAgent from letta.schemas.block import CreateBlock from letta.schemas.group import ( - DynamicManager, DynamicManagerUpdate, GroupCreate, GroupUpdate, ManagerType, - RoundRobinManagerUpdate, - SupervisorManager, ) -from letta.schemas.message import MessageCreate from letta.server.server import SyncServer @@ -125,30 +119,6 @@ async def manager_agent(server, default_user): yield agent_scooby -async def test_empty_group(server, default_user): - group = await server.group_manager.create_group_async( - group=GroupCreate( - description="This is a group chat between best friends all like to hang out together. In their free time they like to solve mysteries.", - agent_ids=[], - ), - actor=default_user, - ) - with pytest.raises(ValueError, match="Empty group"): - await server.send_group_message_to_agent( - group_id=group.id, - actor=default_user, - input_messages=[ - MessageCreate( - role="user", - content="what is everyone up to for the holidays?", - ), - ], - stream_steps=False, - stream_tokens=False, - ) - await server.group_manager.delete_group_async(group_id=group.id, actor=default_user) - - async def test_modify_group_pattern(server, default_user, four_participant_agents, manager_agent): group = await server.group_manager.create_group_async( group=GroupCreate( @@ -195,243 +165,3 @@ async def test_list_agent_groups(server, default_user, four_participant_agents): await server.group_manager.delete_group_async(group_id=group_a.id, actor=default_user) await server.group_manager.delete_group_async(group_id=group_b.id, actor=default_user) - - -async def test_round_robin(server, default_user, four_participant_agents): - description = ( - "This is a group chat between best friends all like to hang out together. In their free time they like to solve mysteries." - ) - group = await server.group_manager.create_group_async( - group=GroupCreate( - description=description, - agent_ids=[agent.id for agent in four_participant_agents], - ), - actor=default_user, - ) - - # verify group creation - assert group.manager_type == ManagerType.round_robin - assert group.description == description - assert group.agent_ids == [agent.id for agent in four_participant_agents] - assert group.max_turns is None - assert group.manager_agent_id is None - assert group.termination_token is None - - try: - server.group_manager.reset_messages(group_id=group.id, actor=default_user) - response = await server.send_group_message_to_agent( - group_id=group.id, - actor=default_user, - input_messages=[ - MessageCreate( - role="user", - content="what is everyone up to for the holidays?", - ), - ], - stream_steps=False, - stream_tokens=False, - ) - assert response.usage.step_count == len(group.agent_ids) - assert len(response.messages) == response.usage.step_count * 2 - for i, message in enumerate(response.messages): - assert message.message_type == "reasoning_message" if i % 2 == 0 else "assistant_message" - assert message.name == four_participant_agents[i // 2].name - - for agent_id in group.agent_ids: - agent_messages = await server.get_agent_recall( - user_id=default_user.id, - agent_id=agent_id, - group_id=group.id, - reverse=True, - return_message_object=False, - ) - assert len(agent_messages) == len(group.agent_ids) + 2 # add one for user message, one for reasoning message - - # TODO: filter this to return a clean conversation history - messages = server.group_manager.list_group_messages( - group_id=group.id, - actor=default_user, - ) - assert len(messages) == (len(group.agent_ids) + 2) * len(group.agent_ids) - - max_turns = 3 - group = await server.group_manager.modify_group_async( - group_id=group.id, - group_update=GroupUpdate( - agent_ids=[agent.id for agent in four_participant_agents][::-1], - manager_config=RoundRobinManagerUpdate( - max_turns=max_turns, - ), - ), - actor=default_user, - ) - assert group.manager_type == ManagerType.round_robin - assert group.description == description - assert group.agent_ids == [agent.id for agent in four_participant_agents][::-1] - assert group.max_turns == max_turns - assert group.manager_agent_id is None - assert group.termination_token is None - - server.group_manager.reset_messages(group_id=group.id, actor=default_user) - - response = await server.send_group_message_to_agent( - group_id=group.id, - actor=default_user, - input_messages=[ - MessageCreate( - role="user", - content="when should we plan our next adventure?", - ), - ], - stream_steps=False, - stream_tokens=False, - ) - assert response.usage.step_count == max_turns - assert len(response.messages) == max_turns * 2 - - for i, message in enumerate(response.messages): - assert message.message_type == "reasoning_message" if i % 2 == 0 else "assistant_message" - assert message.name == four_participant_agents[::-1][i // 2].name - - for i in range(len(group.agent_ids)): - agent_messages = await server.get_agent_recall( - user_id=default_user.id, - agent_id=group.agent_ids[i], - group_id=group.id, - reverse=True, - return_message_object=False, - ) - expected_message_count = max_turns + 1 if i >= max_turns else max_turns + 2 - assert len(agent_messages) == expected_message_count - - finally: - await server.group_manager.delete_group_async(group_id=group.id, actor=default_user) - - -async def test_supervisor(server, default_user, four_participant_agents): - agent_scrappy = await server.create_agent_async( - request=CreateAgent( - name="shaggy", - memory_blocks=[ - CreateBlock( - label="persona", - value="You are a puppy operations agent for Letta and you help run multi-agent group chats. Your role is to supervise the group, sending messages and aggregating the responses.", - ), - CreateBlock( - label="human", - value="", - ), - ], - model="openai/gpt-4o-mini", - embedding="openai/text-embedding-3-small", - ), - actor=default_user, - ) - - group = await server.group_manager.create_group_async( - group=GroupCreate( - description="This is a group chat between best friends all like to hang out together. In their free time they like to solve mysteries.", - agent_ids=[agent.id for agent in four_participant_agents], - manager_config=SupervisorManager( - manager_agent_id=agent_scrappy.id, - ), - ), - actor=default_user, - ) - try: - response = await server.send_group_message_to_agent( - group_id=group.id, - actor=default_user, - input_messages=[ - MessageCreate( - role="user", - content="ask everyone what they like to do for fun and then come up with an activity for everyone to do together.", - ), - ], - stream_steps=False, - stream_tokens=False, - ) - assert response.usage.step_count == 2 - assert len(response.messages) == 5 - - # verify tool call - assert response.messages[0].message_type == "reasoning_message" - assert ( - response.messages[1].message_type == "tool_call_message" - and response.messages[1].tool_call.name == "send_message_to_all_agents_in_group" - ) - assert response.messages[2].message_type == "tool_return_message" and len(eval(response.messages[2].tool_return)) == len( - four_participant_agents - ) - assert response.messages[3].message_type == "reasoning_message" - assert response.messages[4].message_type == "assistant_message" - - finally: - await server.group_manager.delete_group_async(group_id=group.id, actor=default_user) - server.agent_manager.delete_agent(agent_id=agent_scrappy.id, actor=default_user) - - -@pytest.mark.flaky(max_runs=2) -async def test_dynamic_group_chat(server, default_user, manager_agent, four_participant_agents): - description = ( - "This is a group chat between best friends all like to hang out together. In their free time they like to solve mysteries." - ) - # error on duplicate agent in participant list - with pytest.raises(ValueError, match="Duplicate agent ids"): - await server.group_manager.create_group_async( - group=GroupCreate( - description=description, - agent_ids=[agent.id for agent in four_participant_agents] + [four_participant_agents[0].id], - manager_config=DynamicManager( - manager_agent_id=manager_agent.id, - ), - ), - actor=default_user, - ) - # error on duplicate agent names - duplicate_agent_shaggy = server.create_agent( - request=CreateAgent( - name="shaggy", - model="openai/gpt-4o-mini", - embedding="openai/text-embedding-3-small", - ), - actor=default_user, - ) - with pytest.raises(ValueError, match="Duplicate agent names"): - await server.group_manager.create_group_async( - group=GroupCreate( - description=description, - agent_ids=[agent.id for agent in four_participant_agents] + [duplicate_agent_shaggy.id], - manager_config=DynamicManager( - manager_agent_id=manager_agent.id, - ), - ), - actor=default_user, - ) - server.agent_manager.delete_agent(duplicate_agent_shaggy.id, actor=default_user) - - group = await server.group_manager.create_group_async( - group=GroupCreate( - description=description, - agent_ids=[agent.id for agent in four_participant_agents], - manager_config=DynamicManager( - manager_agent_id=manager_agent.id, - ), - ), - actor=default_user, - ) - try: - response = await server.send_group_message_to_agent( - group_id=group.id, - actor=default_user, - input_messages=[ - MessageCreate(role="user", content="what is everyone up to for the holidays?"), - ], - stream_steps=False, - stream_tokens=False, - ) - assert response.usage.step_count == len(four_participant_agents) * 2 - assert len(response.messages) == response.usage.step_count * 2 - - finally: - await server.group_manager.delete_group_async(group_id=group.id, actor=default_user) diff --git a/tests/test_openai_prompt_cache_request_fields.py b/tests/test_openai_prompt_cache_request_fields.py new file mode 100644 index 00000000..b0a06c47 --- /dev/null +++ b/tests/test_openai_prompt_cache_request_fields.py @@ -0,0 +1,150 @@ +from letta.llm_api.openai_client import OpenAIClient +from letta.schemas.enums import AgentType, MessageRole +from letta.schemas.letta_message_content import TextContent +from letta.schemas.llm_config import LLMConfig +from letta.schemas.message import Message + + +def _message(text: str = "hello") -> Message: + return Message( + role=MessageRole.user, + content=[TextContent(text=text)], + agent_id="agent-abc", + ) + + +def _openai_config(model: str, endpoint_type: str = "openai", provider_name: str | None = "openai") -> LLMConfig: + return LLMConfig( + model=model, + model_endpoint_type=endpoint_type, + model_endpoint="https://api.openai.com/v1", + context_window=256000, + provider_name=provider_name, + ) + + +def test_responses_request_sets_24h_retention_for_supported_model(): + client = OpenAIClient() + llm_config = _openai_config(model="gpt-5.1") + messages = [_message()] + + request_data = client.build_request_data( + agent_type=AgentType.letta_v1_agent, + messages=messages, + llm_config=llm_config, + tools=[], + ) + + assert "input" in request_data + assert "prompt_cache_key" not in request_data + assert request_data.get("prompt_cache_retention") == "24h" + + +def test_responses_request_omits_24h_for_unsupported_model(): + client = OpenAIClient() + llm_config = _openai_config(model="o3-mini") + messages = [_message()] + + request_data = client.build_request_data( + agent_type=AgentType.letta_v1_agent, + messages=messages, + llm_config=llm_config, + tools=[], + ) + + assert "prompt_cache_key" not in request_data + assert "prompt_cache_retention" not in request_data + + +def test_chat_completions_request_sets_24h_retention_for_supported_model(): + client = OpenAIClient() + llm_config = _openai_config(model="gpt-4.1") + messages = [_message()] + + request_data = client.build_request_data( + agent_type=AgentType.memgpt_v2_agent, + messages=messages, + llm_config=llm_config, + tools=[], + ) + + assert "messages" in request_data + assert "prompt_cache_key" not in request_data + assert request_data.get("prompt_cache_retention") == "24h" + + +def test_chat_completions_request_omits_24h_for_unsupported_model(): + client = OpenAIClient() + llm_config = _openai_config(model="gpt-4o-mini") + messages = [_message()] + + request_data = client.build_request_data( + agent_type=AgentType.memgpt_v2_agent, + messages=messages, + llm_config=llm_config, + tools=[], + ) + + assert "prompt_cache_key" not in request_data + assert "prompt_cache_retention" not in request_data + + +def test_openrouter_request_omits_all_prompt_cache_fields(): + client = OpenAIClient() + llm_config = LLMConfig( + model="gpt-5.1", + handle="openrouter/gpt-5.1", + model_endpoint_type="openai", + model_endpoint="https://openrouter.ai/api/v1", + context_window=256000, + provider_name="openrouter", + ) + messages = [_message()] + + responses_request_data = client.build_request_data( + agent_type=AgentType.letta_v1_agent, + messages=messages, + llm_config=llm_config, + tools=[], + ) + chat_request_data = client.build_request_data( + agent_type=AgentType.memgpt_v2_agent, + messages=messages, + llm_config=llm_config, + tools=[], + ) + + assert "prompt_cache_key" not in responses_request_data + assert "prompt_cache_retention" not in responses_request_data + assert "prompt_cache_key" not in chat_request_data + assert "prompt_cache_retention" not in chat_request_data + + +def test_gpt5_family_gets_24h_retention(): + """gpt-5, gpt-5-codex, gpt-5.1, gpt-5.2 all get 24h retention.""" + client = OpenAIClient() + + for model in ["gpt-5", "gpt-5-codex", "gpt-5.1", "gpt-5.1-codex", "gpt-5.2"]: + llm_config = _openai_config(model=model) + request_data = client.build_request_data( + agent_type=AgentType.letta_v1_agent, + messages=[_message()], + llm_config=llm_config, + tools=[], + ) + assert request_data.get("prompt_cache_retention") == "24h", f"{model} should get 24h retention" + + +def test_gpt5_mini_excluded_from_24h_retention(): + """gpt-5-mini is not listed in OpenAI docs for extended retention.""" + client = OpenAIClient() + llm_config = _openai_config(model="gpt-5-mini") + + request_data = client.build_request_data( + agent_type=AgentType.letta_v1_agent, + messages=[_message()], + llm_config=llm_config, + tools=[], + ) + + assert "prompt_cache_retention" not in request_data diff --git a/tests/test_prompt_caching.py b/tests/test_prompt_caching.py index 9f5ac8ab..432a2b41 100644 --- a/tests/test_prompt_caching.py +++ b/tests/test_prompt_caching.py @@ -221,12 +221,12 @@ CACHING_TEST_CONFIGS = [ # The docs say "Implicit caching is enabled by default for all Gemini 2.5 models" # This suggests 3 Pro Preview may require explicit caching instead pytest.param( - "google_ai/gemini-3-pro-preview", + "google_ai/gemini-3.1-pro-preview", {}, 2048, # Min tokens for 3 Pro Preview "cached_tokens", # Field name (normalized from cached_content_token_count) None, # No separate write field - id="gemini-3-pro-preview-implicit", + id="gemini-3.1-pro-preview-implicit", marks=pytest.mark.xfail(reason="Gemini 3 Pro Preview doesn't have implicit caching (only 2.5 models do)"), ), ] @@ -542,7 +542,7 @@ async def test_prompt_caching_cache_invalidation_on_memory_update( try: # Message 1: Establish cache - response1 = await async_client.agents.messages.create( + await async_client.agents.messages.create( agent_id=agent.id, messages=[MessageCreateParam(role="user", content="Hello!")], ) @@ -682,7 +682,7 @@ async def test_anthropic_system_prompt_stability(async_client: AsyncLetta): logger.info(diff_output[:2000]) # Truncate if too long logger.info("=" * 80) - if "Memory blocks were last modified" in diff_output: + if "System prompt last recompiled" in diff_output: logger.error("⚠️ TIMESTAMP IS CHANGING IN ACTUAL REQUESTS!") logger.error(" → This is the root cause of cache misses") @@ -702,8 +702,6 @@ async def test_anthropic_inspect_raw_request(async_client: AsyncLetta): agent = await create_agent_with_large_memory(async_client, model, {}, "anthropic-debug") try: - import json - # Message 1 response1 = await async_client.agents.messages.create( agent_id=agent.id, @@ -926,7 +924,7 @@ async def test_gemini_3_pro_preview_implicit_caching(async_client: AsyncLetta): Since implicit caching is stochastic (depends on routing, timing, etc.), we send multiple messages in quick succession and check if ANY of them hit the cache. """ - model = "google_ai/gemini-3-pro-preview" + model = "google_ai/gemini-3.1-pro-preview" agent = await create_agent_with_large_memory(async_client, model, {}, "gemini-3-pro") try: @@ -1054,7 +1052,7 @@ async def test_gemini_request_prefix_stability(async_client: AsyncLetta): lineterm="", ) diff_output = "\n".join(diff) - if "Memory blocks were last modified" in diff_output or "timestamp" in diff_output.lower(): + if "System prompt last recompiled" in diff_output or "timestamp" in diff_output.lower(): logger.error("⚠️ TIMESTAMP IN SYSTEM INSTRUCTION IS CHANGING!") logger.error(" → This breaks Gemini implicit caching (prefix must match)") else: diff --git a/tests/test_provider_trace.py b/tests/test_provider_trace.py index d2fc4f47..537d6b83 100644 --- a/tests/test_provider_trace.py +++ b/tests/test_provider_trace.py @@ -14,7 +14,6 @@ import os import threading import time import uuid -from unittest.mock import patch import pytest from dotenv import load_dotenv diff --git a/tests/test_provider_trace_agents.py b/tests/test_provider_trace_agents.py index 830d776c..9adf52b6 100644 --- a/tests/test_provider_trace_agents.py +++ b/tests/test_provider_trace_agents.py @@ -198,6 +198,7 @@ class TestAdapterTelemetryAttributes: """Verify base LettaLLMAdapter has telemetry attributes.""" from letta.adapters.letta_llm_adapter import LettaLLMAdapter from letta.llm_api.llm_client import LLMClient + from letta.schemas.enums import LLMCallType mock_client = LLMClient.create(provider_type="openai", put_inner_thoughts_first=True) @@ -212,6 +213,7 @@ class TestAdapterTelemetryAttributes: adapter = TestAdapter( llm_client=mock_client, llm_config=mock_llm_config, + call_type=LLMCallType.agent_step, agent_id=agent_id, agent_tags=agent_tags, run_id=run_id, @@ -220,11 +222,13 @@ class TestAdapterTelemetryAttributes: assert adapter.agent_id == agent_id assert adapter.agent_tags == agent_tags assert adapter.run_id == run_id + assert adapter.call_type == LLMCallType.agent_step def test_request_adapter_inherits_telemetry_attributes(self, mock_llm_config): """Verify LettaLLMRequestAdapter inherits telemetry attributes.""" from letta.adapters.letta_llm_request_adapter import LettaLLMRequestAdapter from letta.llm_api.llm_client import LLMClient + from letta.schemas.enums import LLMCallType mock_client = LLMClient.create(provider_type="openai", put_inner_thoughts_first=True) @@ -235,6 +239,7 @@ class TestAdapterTelemetryAttributes: adapter = LettaLLMRequestAdapter( llm_client=mock_client, llm_config=mock_llm_config, + call_type=LLMCallType.agent_step, agent_id=agent_id, agent_tags=agent_tags, run_id=run_id, @@ -248,6 +253,7 @@ class TestAdapterTelemetryAttributes: """Verify LettaLLMStreamAdapter inherits telemetry attributes.""" from letta.adapters.letta_llm_stream_adapter import LettaLLMStreamAdapter from letta.llm_api.llm_client import LLMClient + from letta.schemas.enums import LLMCallType mock_client = LLMClient.create(provider_type="openai", put_inner_thoughts_first=True) @@ -258,6 +264,7 @@ class TestAdapterTelemetryAttributes: adapter = LettaLLMStreamAdapter( llm_client=mock_client, llm_config=mock_llm_config, + call_type=LLMCallType.agent_step, agent_id=agent_id, agent_tags=agent_tags, run_id=run_id, @@ -272,13 +279,14 @@ class TestAdapterTelemetryAttributes: from letta.adapters.letta_llm_request_adapter import LettaLLMRequestAdapter from letta.adapters.letta_llm_stream_adapter import LettaLLMStreamAdapter from letta.llm_api.llm_client import LLMClient + from letta.schemas.enums import LLMCallType mock_client = LLMClient.create(provider_type="openai", put_inner_thoughts_first=True) - request_adapter = LettaLLMRequestAdapter(llm_client=mock_client, llm_config=mock_llm_config) - stream_adapter = LettaLLMStreamAdapter(llm_client=mock_client, llm_config=mock_llm_config) + request_adapter = LettaLLMRequestAdapter(llm_client=mock_client, llm_config=mock_llm_config, call_type=LLMCallType.agent_step) + stream_adapter = LettaLLMStreamAdapter(llm_client=mock_client, llm_config=mock_llm_config, call_type=LLMCallType.agent_step) - for attr in ["agent_id", "agent_tags", "run_id"]: + for attr in ["agent_id", "agent_tags", "run_id", "call_type"]: assert hasattr(request_adapter, attr), f"LettaLLMRequestAdapter missing {attr}" assert hasattr(stream_adapter, attr), f"LettaLLMStreamAdapter missing {attr}" diff --git a/tests/test_provider_trace_backends.py b/tests/test_provider_trace_backends.py index f1051d1c..34238102 100644 --- a/tests/test_provider_trace_backends.py +++ b/tests/test_provider_trace_backends.py @@ -1,18 +1,17 @@ """Unit tests for provider trace backends.""" -import asyncio import json import os import socket import tempfile import threading -from unittest.mock import AsyncMock, MagicMock, patch +from unittest.mock import patch import pytest from letta.schemas.provider_trace import ProviderTrace from letta.schemas.user import User -from letta.services.provider_trace_backends.base import ProviderTraceBackend, ProviderTraceBackendClient +from letta.services.provider_trace_backends.base import ProviderTraceBackend from letta.services.provider_trace_backends.socket import SocketProviderTraceBackend @@ -288,8 +287,8 @@ class TestSocketProviderTraceBackend: assert captured_records[0]["error"] == "Rate limit exceeded" assert captured_records[0]["response"] is None - def test_record_includes_v2_protocol_fields(self): - """Test that v2 protocol fields are included in the socket record.""" + def test_record_includes_v3_protocol_fields(self): + """Test that v3 protocol fields are included in the socket record.""" trace = ProviderTrace( request_json={"model": "gpt-4"}, response_json={"id": "test"}, @@ -312,7 +311,7 @@ class TestSocketProviderTraceBackend: assert len(captured_records) == 1 record = captured_records[0] - assert record["protocol_version"] == 2 + assert record["protocol_version"] == 3 assert record["org_id"] == "org-456" assert record["user_id"] == "user-456" assert record["compaction_settings"] == {"mode": "sliding_window"} @@ -341,7 +340,6 @@ class TestBackendFactory: def test_get_multiple_backends(self): """Test getting multiple backends via environment.""" - import os from letta.services.provider_trace_backends.factory import ( get_provider_trace_backends, diff --git a/tests/test_provider_trace_summarization.py b/tests/test_provider_trace_summarization.py index 3f114736..12a4a65a 100644 --- a/tests/test_provider_trace_summarization.py +++ b/tests/test_provider_trace_summarization.py @@ -11,7 +11,6 @@ from unittest.mock import AsyncMock, MagicMock, patch import pytest from letta.schemas.agent import AgentState -from letta.schemas.block import Block from letta.schemas.embedding_config import EmbeddingConfig from letta.schemas.enums import MessageRole from letta.schemas.llm_config import LLMConfig @@ -210,6 +209,7 @@ class TestSummarizeSlidingWindowTelemetryContext: await summarizer_sliding_window.summarize_via_sliding_window( actor=mock_actor, llm_config=mock_llm_config, + agent_llm_config=mock_llm_config, # case where agent and summarizer have same config summarizer_config=mock_compaction_settings, in_context_messages=mock_messages, agent_id=agent_id, diff --git a/tests/test_providers.py b/tests/test_providers.py index 508e23e6..a7ab5830 100644 --- a/tests/test_providers.py +++ b/tests/test_providers.py @@ -19,10 +19,16 @@ from letta.schemas.providers import ( VLLMProvider, ZAIProvider, ) +from letta.schemas.providers.chatgpt_oauth import CHATGPT_MODELS from letta.schemas.secret import Secret from letta.settings import model_settings +def test_chatgpt_oauth_model_allowlist_includes_gpt_5_3_codex(): + model_names = {model["name"] for model in CHATGPT_MODELS} + assert "gpt-5.3-codex" in model_names + + def test_openai(): provider = OpenAIProvider( name="openai", @@ -138,14 +144,15 @@ async def test_minimax(): provider = MiniMaxProvider(name="minimax") models = await provider.list_llm_models_async() - # Should have exactly 3 models: M2.1, M2.1-lightning, M2 - assert len(models) == 3 + # Should have exactly 3 models: M2.1, M2.1-lightning, M2, M2.5 + assert len(models) == 4 # Verify model properties model_names = {m.model for m in models} assert "MiniMax-M2.1" in model_names assert "MiniMax-M2.1-lightning" in model_names assert "MiniMax-M2" in model_names + assert "MiniMax-M2.5" in model_names # Verify handle format for model in models: @@ -227,6 +234,21 @@ async def test_vllm(): assert len(embedding_models) == 0 # embedding models currently not supported by vLLM +@pytest.mark.skipif(model_settings.lmstudio_base_url is None, reason="Only run if LMSTUDIO_BASE_URL is set.") +@pytest.mark.asyncio +async def test_lmstudio(): + from letta.schemas.providers.lmstudio import LMStudioOpenAIProvider + + provider = LMStudioOpenAIProvider(name="lmstudio_openai", base_url=model_settings.lmstudio_base_url) + models = await provider.list_llm_models_async() + assert len(models) > 0 + assert models[0].handle == f"{provider.name}/{models[0].model}" + + embedding_models = await provider.list_embedding_models_async() + assert len(embedding_models) > 0 + assert embedding_models[0].handle == f"{provider.name}/{embedding_models[0].embedding_model}" + + @pytest.mark.skipif(model_settings.sglang_api_base is None, reason="Only run if SGLANG_API_BASE is set.") @pytest.mark.asyncio async def test_sglang(): diff --git a/tests/test_redis_client.py b/tests/test_redis_client.py index 7017de9e..11bb9bc6 100644 --- a/tests/test_redis_client.py +++ b/tests/test_redis_client.py @@ -23,4 +23,4 @@ async def test_redis_client(): assert await redis_client.smismember(k, "invalid") == 0 assert await redis_client.smismember(k, v[0]) == 1 assert await redis_client.smismember(k, v[:2]) == [1, 1] - assert await redis_client.smismember(k, v[2:] + ["invalid"]) == [1, 0] + assert await redis_client.smismember(k, [*v[2:], "invalid"]) == [1, 0] diff --git a/tests/test_sdk_client.py b/tests/test_sdk_client.py index 91e340b8..ec749c54 100644 --- a/tests/test_sdk_client.py +++ b/tests/test_sdk_client.py @@ -6,7 +6,7 @@ import textwrap import threading import time import uuid -from typing import List, Type +from typing import ClassVar, List, Type import pytest from dotenv import load_dotenv @@ -24,14 +24,12 @@ from letta_client.types import ( TerminalToolRule, ToolReturnMessage, ) -from letta_client.types.agents.text_content_param import TextContentParam from letta_client.types.tool import BaseTool from pydantic import BaseModel, Field from letta.config import LettaConfig from letta.jobs.llm_batch_job_polling import poll_running_llm_batches from letta.server.server import SyncServer -from tests.helpers.utils import upload_file_and_wait from tests.utils import wait_for_server # Constants @@ -381,7 +379,7 @@ def test_add_and_manage_tags_for_agent(client: LettaSDKClient): assert len(agent.tags) == 0 # Step 1: Add multiple tags to the agent - updated_agent = client.agents.update(agent_id=agent.id, tags=tags_to_add) + client.agents.update(agent_id=agent.id, tags=tags_to_add) # Add small delay to ensure tags are persisted time.sleep(0.1) @@ -399,7 +397,7 @@ def test_add_and_manage_tags_for_agent(client: LettaSDKClient): # Step 4: Delete a specific tag from the agent and verify its removal tag_to_delete = tags_to_add.pop() - updated_agent = client.agents.update(agent_id=agent.id, tags=tags_to_add) + client.agents.update(agent_id=agent.id, tags=tags_to_add) # Verify the tag is removed from the agent's tags - explicitly request tags remaining_tags = client.agents.retrieve(agent_id=agent.id, include=["agent.tags"]).tags @@ -428,7 +426,7 @@ def test_reset_messages(client: LettaSDKClient): try: # Send a message - response = client.agents.messages.create( + client.agents.messages.create( agent_id=agent.id, messages=[MessageCreateParam(role="user", content="Hello")], ) @@ -544,7 +542,6 @@ def test_list_files_for_agent(client: LettaSDKClient): raise RuntimeError(f"File {file_metadata.id} not found") if file_metadata.processing_status == "error": raise RuntimeError(f"File processing failed: {getattr(file_metadata, 'error_message', 'Unknown error')}") - test_file = file_metadata agent = client.agents.create( memory_blocks=[CreateBlockParam(label="persona", value="test")], @@ -606,7 +603,7 @@ def test_modify_message(client: LettaSDKClient): try: # Send a message - response = client.agents.messages.create( + client.agents.messages.create( agent_id=agent.id, messages=[MessageCreateParam(role="user", content="Original message")], ) @@ -989,11 +986,6 @@ def test_function_always_error(client: LettaSDKClient, agent: AgentState): def test_agent_creation(client: LettaSDKClient): """Test that block IDs are properly attached when creating an agent.""" - sleeptime_agent_system = """ - You are a helpful agent. You will be provided with a list of memory blocks and a user preferences block. - You should use the memory blocks to remember information about the user and their preferences. - You should also use the user preferences block to remember information about the user's preferences. - """ # Create a test block that will represent user preferences user_preferences_block = client.blocks.create( @@ -1120,7 +1112,7 @@ def test_include_return_message_types(client: LettaSDKClient, agent: AgentState, memory_blocks=[ CreateBlockParam(label="user", value="Name: Charles"), ], - model="anthropic/claude-haiku-4-5-20251001", + model="anthropic/claude-haiku-4-5", embedding="openai/text-embedding-3-small", ) @@ -1257,7 +1249,7 @@ def test_pydantic_inventory_management_tool(e2b_sandbox_mode, client: LettaSDKCl name: str = "manage_inventory" args_schema: Type[BaseModel] = InventoryEntryData description: str = "Update inventory catalogue with a new data entry" - tags: List[str] = ["inventory", "shop"] + tags: ClassVar[List[str]] = ["inventory", "shop"] def run(self, data: InventoryEntry, quantity_change: int) -> bool: print(f"Updated inventory for {data.item.name} with a quantity change of {quantity_change}") @@ -2153,13 +2145,13 @@ async def test_create_batch(client: LettaSDKClient, server: SyncServer): agent1 = client.agents.create( name="agent1_batch", memory_blocks=[{"label": "persona", "value": "you are agent 1"}], - model="anthropic/claude-3-7-sonnet-20250219", + model="anthropic/claude-sonnet-4-20250514", embedding="openai/text-embedding-3-small", ) agent2 = client.agents.create( name="agent2_batch", memory_blocks=[{"label": "persona", "value": "you are agent 2"}], - model="anthropic/claude-3-7-sonnet-20250219", + model="anthropic/claude-sonnet-4-20250514", embedding="openai/text-embedding-3-small", ) @@ -2383,7 +2375,7 @@ def test_create_agent_with_tools(client: LettaSDKClient) -> None: name: str = "manage_inventory" args_schema: Type[BaseModel] = InventoryEntryData description: str = "Update inventory catalogue with a new data entry" - tags: List[str] = ["inventory", "shop"] + tags: ClassVar[List[str]] = ["inventory", "shop"] def run(self, data: InventoryEntry, quantity_change: int) -> bool: """ @@ -2446,7 +2438,7 @@ def test_calling_tools(client: LettaSDKClient, agent: AgentState) -> None: assert len(blocks) == 1, f"Expected 1 block, got {len(blocks)}" # test calling a stateful tool - result = client.agents.tools.run(agent_id=agent.id, tool_name="memory_insert", args={"label": "human", "new_str": "test"}) + result = client.agents.tools.run(agent_id=agent.id, tool_name="memory_insert", args={"label": "human", "new_string": "test"}) assert result.status == "success", f"Expected success, got {result.status}" # get the block block = client.agents.blocks.retrieve(agent_id=agent.id, block_label="human") diff --git a/tests/test_secret.py b/tests/test_secret.py index 0dbc80d9..cd4d6390 100644 --- a/tests/test_secret.py +++ b/tests/test_secret.py @@ -1,5 +1,4 @@ -import json -from unittest.mock import MagicMock, patch +from unittest.mock import patch import pytest diff --git a/tests/test_server.py b/tests/test_server.py index 490463e2..48dfc51b 100644 --- a/tests/test_server.py +++ b/tests/test_server.py @@ -1,39 +1,20 @@ -import json import os -import shutil -import uuid -import warnings -from typing import List, Tuple -from unittest.mock import patch import pytest -from sqlalchemy import delete import letta.utils as utils from letta.agents.agent_loop import AgentLoop -from letta.constants import BASE_MEMORY_TOOLS, BASE_TOOLS, LETTA_DIR, LETTA_TOOL_EXECUTION_DIR -from letta.orm import Provider, Step -from letta.schemas.block import CreateBlock from letta.schemas.enums import MessageRole, ProviderType -from letta.schemas.letta_message import LettaMessage, ReasoningMessage, SystemMessage, ToolCallMessage, ToolReturnMessage, UserMessage -from letta.schemas.llm_config import LLMConfig from letta.schemas.providers import Provider as PydanticProvider, ProviderCreate -from letta.schemas.sandbox_config import SandboxType from letta.schemas.user import User utils.DEBUG = True from letta.config import LettaConfig from letta.orm.errors import NoResultFound -from letta.schemas.agent import CreateAgent, UpdateAgent -from letta.schemas.embedding_config import EmbeddingConfig -from letta.schemas.job import Job as PydanticJob -from letta.schemas.message import Message, MessageCreate +from letta.schemas.agent import CreateAgent +from letta.schemas.message import MessageCreate from letta.schemas.run import Run as PydanticRun -from letta.schemas.source import Source as PydanticSource from letta.server.server import SyncServer -from letta.system import unpack_message - -from .utils import DummyDataConnector @pytest.fixture @@ -102,7 +83,7 @@ async def custom_anthropic_provider(server: SyncServer, user_id: str): @pytest.fixture async def agent(server: SyncServer, user: User): - actor = await server.user_manager.get_actor_or_default_async() + await server.user_manager.get_actor_or_default_async() agent = await server.create_agent_async( CreateAgent( agent_type="memgpt_v2_agent", @@ -148,7 +129,6 @@ async def test_messages_with_provider_override(server: SyncServer, custom_anthro run_id=run.id, ) usage = response.usage - messages = response.messages get_messages_response = await server.message_manager.list_messages(agent_id=agent.id, actor=actor, after=existing_messages[-1].id) @@ -247,7 +227,6 @@ async def test_messages_with_provider_override_legacy_agent(server: SyncServer, run_id=run.id, ) usage = response.usage - messages = response.messages get_messages_response = await server.message_manager.list_messages(agent_id=agent.id, actor=actor, after=existing_messages[-1].id) diff --git a/tests/test_server_providers.py b/tests/test_server_providers.py index e628497d..9c3cc7ff 100644 --- a/tests/test_server_providers.py +++ b/tests/test_server_providers.py @@ -1,5 +1,6 @@ """Tests for provider initialization via ProviderManager.sync_base_providers and provider model persistence.""" +import json import uuid from unittest.mock import AsyncMock, MagicMock, patch @@ -109,7 +110,6 @@ async def test_sync_base_providers_handles_race_condition(default_user, provider # Mock a race condition: list returns empty, but create fails with UniqueConstraintViolation original_list = provider_manager.list_providers_async - original_create = provider_manager.create_provider_async call_count = {"count": 0} @@ -2029,14 +2029,14 @@ async def test_get_enabled_providers_async_queries_database(default_user, provid api_key="sk-test-key", base_url="https://api.openai.com/v1", ) - base_provider = await provider_manager.create_provider_async(base_provider_create, actor=default_user, is_byok=False) + await provider_manager.create_provider_async(base_provider_create, actor=default_user, is_byok=False) byok_provider_create = ProviderCreate( name=f"test-byok-provider-{test_id}", provider_type=ProviderType.anthropic, api_key="sk-test-byok-key", ) - byok_provider = await provider_manager.create_provider_async(byok_provider_create, actor=default_user, is_byok=True) + await provider_manager.create_provider_async(byok_provider_create, actor=default_user, is_byok=True) # Create server instance - importantly, don't set _enabled_providers # This ensures we're testing database queries, not in-memory list @@ -2181,7 +2181,7 @@ async def test_byok_provider_api_key_stored_in_db(default_user, provider_manager provider_type=ProviderType.openai, api_key="sk-byok-should-be-stored", ) - byok_provider = await provider_manager.create_provider_async(byok_provider_create, actor=default_user, is_byok=True) + await provider_manager.create_provider_async(byok_provider_create, actor=default_user, is_byok=True) # Retrieve the provider from database providers = await provider_manager.list_providers_async(name=f"test-byok-with-key-{test_id}", actor=default_user) @@ -2572,7 +2572,7 @@ async def test_byok_provider_last_synced_triggers_sync_when_null(default_user, p with patch.object(Provider, "cast_to_subtype", return_value=mock_typed_provider): # List BYOK models - should trigger sync because last_synced is null - byok_models = await server.list_llm_models_async( + await server.list_llm_models_async( actor=default_user, provider_category=[ProviderCategory.byok], ) @@ -2599,7 +2599,6 @@ async def test_byok_provider_last_synced_triggers_sync_when_null(default_user, p @pytest.mark.asyncio async def test_byok_provider_last_synced_skips_sync_when_set(default_user, provider_manager): """Test that BYOK providers with last_synced set skip sync and read from DB.""" - from datetime import datetime, timezone from letta.schemas.providers import Provider from letta.server.server import SyncServer @@ -2660,10 +2659,69 @@ async def test_byok_provider_last_synced_skips_sync_when_set(default_user, provi assert f"test-byok-cached-{test_id}/gpt-4o" in byok_handles +@pytest.mark.asyncio +async def test_chatgpt_oauth_byok_resyncs_when_allowlist_expands(default_user, provider_manager): + """ChatGPT OAuth providers should backfill newly added hardcoded models.""" + test_id = generate_test_id() + provider_name = f"test-chatgpt-oauth-{test_id}" + + oauth_credentials = json.dumps( + { + "access_token": "test-access-token", + "refresh_token": "test-refresh-token", + "account_id": "test-account-id", + "expires_at": 4_102_444_800, # year 2100 (seconds) + } + ) + + byok_provider = await provider_manager.create_provider_async( + ProviderCreate( + name=provider_name, + provider_type=ProviderType.chatgpt_oauth, + api_key=oauth_credentials, + ), + actor=default_user, + is_byok=True, + ) + + # Simulate a stale provider model cache that predates gpt-5.3-codex. + stale_models = [ + LLMConfig( + model="gpt-5.2-codex", + model_endpoint_type="chatgpt_oauth", + model_endpoint="https://chatgpt.com/backend-api/codex/responses", + context_window=272000, + handle=f"{provider_name}/gpt-5.2-codex", + provider_name=provider_name, + provider_category=ProviderCategory.byok, + ) + ] + await provider_manager.sync_provider_models_async( + provider=byok_provider, + llm_models=stale_models, + embedding_models=[], + organization_id=default_user.organization_id, + ) + await provider_manager.update_provider_last_synced_async(byok_provider.id, actor=default_user) + + server = SyncServer(init_with_default_org_and_user=False) + server.default_user = default_user + server.provider_manager = provider_manager + server._enabled_providers = [] + + byok_models = await server.list_llm_models_async( + actor=default_user, + provider_category=[ProviderCategory.byok], + provider_name=provider_name, + ) + + byok_handles = {model.handle for model in byok_models} + assert f"{provider_name}/gpt-5.3-codex" in byok_handles + + @pytest.mark.asyncio async def test_base_provider_updates_last_synced_on_sync(default_user, provider_manager): """Test that base provider sync updates the last_synced timestamp.""" - from letta.server.server import SyncServer test_id = generate_test_id() @@ -3159,7 +3217,6 @@ async def test_byok_provider_uses_schema_default_base_url(default_user, provider """ from letta.orm.provider import Provider as ProviderORM from letta.schemas.providers import Provider as PydanticProvider - from letta.schemas.providers.zai import ZAIProvider from letta.server.db import db_registry test_id = generate_test_id() diff --git a/tests/test_sonnet_nonnative_reasoning_buffering.py b/tests/test_sonnet_nonnative_reasoning_buffering.py index 7ca6890a..7373f0c6 100755 --- a/tests/test_sonnet_nonnative_reasoning_buffering.py +++ b/tests/test_sonnet_nonnative_reasoning_buffering.py @@ -84,7 +84,7 @@ def agent_factory(client: Letta): for agent_state in created_agents: try: client.agents.delete(agent_state.id) - except: + except Exception: pass # Agent might have already been deleted diff --git a/tests/test_sources.py b/tests/test_sources.py index 79555784..8ceda0a5 100644 --- a/tests/test_sources.py +++ b/tests/test_sources.py @@ -27,8 +27,20 @@ from tests.utils import wait_for_server SERVER_PORT = 8283 -def get_raw_system_message(client: LettaSDKClient, agent_id: str) -> str: +def recompile_agent_system_prompt(client: LettaSDKClient, agent_id: str) -> None: + """Force a system prompt recompilation for deterministic raw-preview assertions.""" + client.post( + f"/v1/agents/{agent_id}/recompile", + cast_to=str, + body={}, + ) + + +def get_raw_system_message(client: LettaSDKClient, agent_id: str, recompile: bool = False) -> str: """Helper function to get the raw system message from an agent's preview payload.""" + if recompile: + recompile_agent_system_prompt(client, agent_id) + raw_payload = client.post( f"/v1/agents/{agent_id}/messages/preview-raw-payload", cast_to=dict[str, Any], @@ -77,9 +89,9 @@ def client() -> LettaSDKClient: @pytest.fixture def agent_state(disable_pinecone, client: LettaSDKClient): - open_file_tool = list(client.tools.list(name="open_files"))[0] - search_files_tool = list(client.tools.list(name="semantic_search_files"))[0] - grep_tool = list(client.tools.list(name="grep_files"))[0] + open_file_tool = next(iter(client.tools.list(name="open_files"))) + search_files_tool = next(iter(client.tools.list(name="semantic_search_files"))) + grep_tool = next(iter(client.tools.list(name="grep_files"))) agent_state = client.agents.create( name="test_sources_agent", @@ -215,7 +227,7 @@ def test_file_upload_creates_source_blocks_correctly( assert any(re.fullmatch(expected_label_regex, b.label) for b in blocks) # verify raw system message contains source information - raw_system_message = get_raw_system_message(client, agent_state.id) + raw_system_message = get_raw_system_message(client, agent_state.id, recompile=True) assert "test_source" in raw_system_message assert "" in raw_system_message # verify file-specific details in raw system message @@ -234,7 +246,7 @@ def test_file_upload_creates_source_blocks_correctly( assert not any(re.fullmatch(expected_label_regex, b.label) for b in blocks) # verify raw system message no longer contains source information - raw_system_message_after_removal = get_raw_system_message(client, agent_state.id) + raw_system_message_after_removal = get_raw_system_message(client, agent_state.id, recompile=True) # this should be in, because we didn't delete the source assert "test_source" in raw_system_message_after_removal assert "" in raw_system_message_after_removal @@ -266,7 +278,7 @@ def test_attach_existing_files_creates_source_blocks_correctly( # Attach after uploading the file client.agents.folders.attach(folder_id=source.id, agent_id=agent_state.id) - raw_system_message = get_raw_system_message(client, agent_state.id) + raw_system_message = get_raw_system_message(client, agent_state.id, recompile=True) # Assert that the expected chunk is in the raw system message expected_chunk = """ @@ -307,7 +319,7 @@ def test_attach_existing_files_creates_source_blocks_correctly( assert not any("test" in b.value for b in blocks) # Verify no traces of the prompt exist in the raw system message after detaching - raw_system_message_after_detach = get_raw_system_message(client, agent_state.id) + raw_system_message_after_detach = get_raw_system_message(client, agent_state.id, recompile=True) assert expected_chunk not in raw_system_message_after_detach assert "test_source" not in raw_system_message_after_detach assert "" not in raw_system_message_after_detach @@ -321,7 +333,7 @@ def test_delete_source_removes_source_blocks_correctly( assert len(list(client.folders.list())) == 1 client.agents.folders.attach(folder_id=source.id, agent_id=agent_state.id) - raw_system_message = get_raw_system_message(client, agent_state.id) + raw_system_message = get_raw_system_message(client, agent_state.id, recompile=True) assert "test_source" in raw_system_message assert "" in raw_system_message @@ -330,7 +342,7 @@ def test_delete_source_removes_source_blocks_correctly( # Upload the files upload_file_and_wait(client, source.id, file_path) - raw_system_message = get_raw_system_message(client, agent_state.id) + raw_system_message = get_raw_system_message(client, agent_state.id, recompile=True) # Assert that the expected chunk is in the raw system message expected_chunk = """ @@ -361,7 +373,7 @@ def test_delete_source_removes_source_blocks_correctly( # Remove file from source client.folders.delete(folder_id=source.id) - raw_system_message_after_detach = get_raw_system_message(client, agent_state.id) + raw_system_message_after_detach = get_raw_system_message(client, agent_state.id, recompile=True) assert expected_chunk not in raw_system_message_after_detach assert "test_source" not in raw_system_message_after_detach assert "" not in raw_system_message_after_detach @@ -520,7 +532,8 @@ def test_agent_uses_search_files_correctly(disable_pinecone, disable_turbopuffer # Check it returned successfully tool_returns = [msg for msg in search_files_response.messages if msg.message_type == "tool_return_message"] assert len(tool_returns) > 0, "No tool returns found" - assert all(tr.status == "success" for tr in tool_returns), f"Tool call failed {tr}" + failed_returns = [tr for tr in tool_returns if tr.status != "success"] + assert len(failed_returns) == 0, f"Tool call failed: {failed_returns}" def test_agent_uses_grep_correctly_basic(disable_pinecone, disable_turbopuffer, client: LettaSDKClient, agent_state: AgentState): @@ -732,13 +745,13 @@ def test_duplicate_file_renaming(disable_pinecone, disable_turbopuffer, client: file_path = "tests/data/test.txt" with open(file_path, "rb") as f: - first_file = client.folders.files.upload(folder_id=source.id, file=f) + client.folders.files.upload(folder_id=source.id, file=f) with open(file_path, "rb") as f: - second_file = client.folders.files.upload(folder_id=source.id, file=f) + client.folders.files.upload(folder_id=source.id, file=f) with open(file_path, "rb") as f: - third_file = client.folders.files.upload(folder_id=source.id, file=f) + client.folders.files.upload(folder_id=source.id, file=f) # Get all uploaded files files = list(client.folders.files.list(folder_id=source.id, limit=10)) @@ -808,7 +821,7 @@ def test_duplicate_file_handling_replace(disable_pinecone, disable_turbopuffer, f.write(replacement_content) # Upload replacement file with REPLACE duplicate handling - replacement_file = upload_file_and_wait(client, source.id, temp_file_path, duplicate_handling="replace") + upload_file_and_wait(client, source.id, temp_file_path, duplicate_handling="replace") # Verify we still have only 1 file (replacement, not addition) files_after_replace = list(client.folders.files.list(folder_id=source.id, limit=10)) @@ -1112,7 +1125,7 @@ def test_agent_open_file(disable_pinecone, disable_turbopuffer, client: LettaSDK closed_files = client.agents.files.open(agent_id=agent_state.id, file_id=file_metadata["id"]) assert len(closed_files) == 0 - system = get_raw_system_message(client, agent_state.id) + system = get_raw_system_message(client, agent_state.id, recompile=True) assert '' in system assert "[Viewing file start (out of 1 lines)]" in system @@ -1137,7 +1150,7 @@ def test_agent_close_file(disable_pinecone, disable_turbopuffer, client: LettaSD # Test close_file function client.agents.files.close(agent_id=agent_state.id, file_id=file_metadata["id"]) - system = get_raw_system_message(client, agent_state.id) + system = get_raw_system_message(client, agent_state.id, recompile=True) assert '' in system @@ -1160,7 +1173,7 @@ def test_agent_close_all_open_files(disable_pinecone, disable_turbopuffer, clien # Open each file client.agents.files.open(agent_id=agent_state.id, file_id=file_metadata["id"]) - system = get_raw_system_message(client, agent_state.id) + system = get_raw_system_message(client, agent_state.id, recompile=True) assert '= end should be allowed (but gives empty result) @@ -561,7 +561,7 @@ def test_line_chunker_only_start_parameter(): assert "3: line3" in result[2] # Test start at end of file - should raise error - with pytest.raises(ValueError, match="File test.py has only 3 lines, but requested offset 4 is out of range"): + with pytest.raises(ValueError, match=r"File test.py has only 3 lines, but requested offset 4 is out of range"): chunker.chunk_text(file, start=3, validate_range=True) @@ -653,10 +653,10 @@ def test_validate_function_response_strict_mode_none(): def test_validate_function_response_strict_mode_violation(): """Test strict mode raises ValueError for non-string/None types""" - with pytest.raises(ValueError, match="Strict mode violation. Function returned type: int"): + with pytest.raises(ValueError, match=r"Strict mode violation. Function returned type: int"): validate_function_response(42, return_char_limit=100, strict=True) - with pytest.raises(ValueError, match="Strict mode violation. Function returned type: dict"): + with pytest.raises(ValueError, match=r"Strict mode violation. Function returned type: dict"): validate_function_response({"key": "value"}, return_char_limit=100, strict=True) diff --git a/uv.lock b/uv.lock index 8028a7a9..67610595 100644 --- a/uv.lock +++ b/uv.lock @@ -2510,7 +2510,7 @@ wheels = [ [[package]] name = "letta" -version = "0.16.4" +version = "0.16.5" source = { editable = "." } dependencies = [ { name = "aiofiles" }, @@ -2580,6 +2580,7 @@ dependencies = [ { name = "temporalio" }, { name = "tqdm" }, { name = "trafilatura" }, + { name = "ty" }, { name = "typer" }, ] @@ -2755,7 +2756,7 @@ requires-dist = [ { name = "readability-lxml" }, { name = "redis", marker = "extra == 'redis'", specifier = ">=6.2.0" }, { name = "rich", specifier = ">=13.9.4" }, - { name = "ruff", extras = ["dev"], specifier = ">=0.12.10" }, + { name = "ruff", specifier = ">=0.12.10" }, { name = "sentry-sdk", extras = ["fastapi"], specifier = "==2.19.1" }, { name = "setuptools", specifier = ">=70" }, { name = "sqlalchemy", extras = ["asyncio"], specifier = ">=2.0.41" }, @@ -2771,6 +2772,7 @@ requires-dist = [ { name = "tqdm", specifier = ">=4.66.1" }, { name = "trafilatura" }, { name = "turbopuffer", marker = "extra == 'external-tools'", specifier = ">=0.5.17" }, + { name = "ty", specifier = ">=0.0.17" }, { name = "typer", specifier = ">=0.15.2" }, { name = "uvicorn", marker = "extra == 'desktop'", specifier = "==0.29.0" }, { name = "uvicorn", marker = "extra == 'server'", specifier = "==0.29.0" }, @@ -5358,28 +5360,27 @@ wheels = [ [[package]] name = "ruff" -version = "0.12.10" +version = "0.15.1" source = { registry = "https://pypi.org/simple" } -sdist = { url = "https://files.pythonhosted.org/packages/3b/eb/8c073deb376e46ae767f4961390d17545e8535921d2f65101720ed8bd434/ruff-0.12.10.tar.gz", hash = "sha256:189ab65149d11ea69a2d775343adf5f49bb2426fc4780f65ee33b423ad2e47f9", size = 5310076, upload-time = "2025-08-21T18:23:22.595Z" } +sdist = { url = "https://files.pythonhosted.org/packages/04/dc/4e6ac71b511b141cf626357a3946679abeba4cf67bc7cc5a17920f31e10d/ruff-0.15.1.tar.gz", hash = "sha256:c590fe13fb57c97141ae975c03a1aedb3d3156030cabd740d6ff0b0d601e203f", size = 4540855, upload-time = "2026-02-12T23:09:09.998Z" } wheels = [ - { url = "https://files.pythonhosted.org/packages/24/e7/560d049d15585d6c201f9eeacd2fd130def3741323e5ccf123786e0e3c95/ruff-0.12.10-py3-none-linux_armv6l.whl", hash = "sha256:8b593cb0fb55cc8692dac7b06deb29afda78c721c7ccfed22db941201b7b8f7b", size = 11935161, upload-time = "2025-08-21T18:22:26.965Z" }, - { url = "https://files.pythonhosted.org/packages/d1/b0/ad2464922a1113c365d12b8f80ed70fcfb39764288ac77c995156080488d/ruff-0.12.10-py3-none-macosx_10_12_x86_64.whl", hash = "sha256:ebb7333a45d56efc7c110a46a69a1b32365d5c5161e7244aaf3aa20ce62399c1", size = 12660884, upload-time = "2025-08-21T18:22:30.925Z" }, - { url = "https://files.pythonhosted.org/packages/d7/f1/97f509b4108d7bae16c48389f54f005b62ce86712120fd8b2d8e88a7cb49/ruff-0.12.10-py3-none-macosx_11_0_arm64.whl", hash = "sha256:d59e58586829f8e4a9920788f6efba97a13d1fa320b047814e8afede381c6839", size = 11872754, upload-time = "2025-08-21T18:22:34.035Z" }, - { url = "https://files.pythonhosted.org/packages/12/ad/44f606d243f744a75adc432275217296095101f83f966842063d78eee2d3/ruff-0.12.10-py3-none-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:822d9677b560f1fdeab69b89d1f444bf5459da4aa04e06e766cf0121771ab844", size = 12092276, upload-time = "2025-08-21T18:22:36.764Z" }, - { url = "https://files.pythonhosted.org/packages/06/1f/ed6c265e199568010197909b25c896d66e4ef2c5e1c3808caf461f6f3579/ruff-0.12.10-py3-none-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:37b4a64f4062a50c75019c61c7017ff598cb444984b638511f48539d3a1c98db", size = 11734700, upload-time = "2025-08-21T18:22:39.822Z" }, - { url = "https://files.pythonhosted.org/packages/63/c5/b21cde720f54a1d1db71538c0bc9b73dee4b563a7dd7d2e404914904d7f5/ruff-0.12.10-py3-none-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:2c6f4064c69d2542029b2a61d39920c85240c39837599d7f2e32e80d36401d6e", size = 13468783, upload-time = "2025-08-21T18:22:42.559Z" }, - { url = "https://files.pythonhosted.org/packages/02/9e/39369e6ac7f2a1848f22fb0b00b690492f20811a1ac5c1fd1d2798329263/ruff-0.12.10-py3-none-manylinux_2_17_ppc64.manylinux2014_ppc64.whl", hash = "sha256:059e863ea3a9ade41407ad71c1de2badfbe01539117f38f763ba42a1206f7559", size = 14436642, upload-time = "2025-08-21T18:22:45.612Z" }, - { url = "https://files.pythonhosted.org/packages/e3/03/5da8cad4b0d5242a936eb203b58318016db44f5c5d351b07e3f5e211bb89/ruff-0.12.10-py3-none-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:1bef6161e297c68908b7218fa6e0e93e99a286e5ed9653d4be71e687dff101cf", size = 13859107, upload-time = "2025-08-21T18:22:48.886Z" }, - { url = "https://files.pythonhosted.org/packages/19/19/dd7273b69bf7f93a070c9cec9494a94048325ad18fdcf50114f07e6bf417/ruff-0.12.10-py3-none-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:4f1345fbf8fb0531cd722285b5f15af49b2932742fc96b633e883da8d841896b", size = 12886521, upload-time = "2025-08-21T18:22:51.567Z" }, - { url = "https://files.pythonhosted.org/packages/c0/1d/b4207ec35e7babaee62c462769e77457e26eb853fbdc877af29417033333/ruff-0.12.10-py3-none-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:1f68433c4fbc63efbfa3ba5db31727db229fa4e61000f452c540474b03de52a9", size = 13097528, upload-time = "2025-08-21T18:22:54.609Z" }, - { url = "https://files.pythonhosted.org/packages/ff/00/58f7b873b21114456e880b75176af3490d7a2836033779ca42f50de3b47a/ruff-0.12.10-py3-none-manylinux_2_31_riscv64.whl", hash = "sha256:141ce3d88803c625257b8a6debf4a0473eb6eed9643a6189b68838b43e78165a", size = 13080443, upload-time = "2025-08-21T18:22:57.413Z" }, - { url = "https://files.pythonhosted.org/packages/12/8c/9e6660007fb10189ccb78a02b41691288038e51e4788bf49b0a60f740604/ruff-0.12.10-py3-none-musllinux_1_2_aarch64.whl", hash = "sha256:f3fc21178cd44c98142ae7590f42ddcb587b8e09a3b849cbc84edb62ee95de60", size = 11896759, upload-time = "2025-08-21T18:23:00.473Z" }, - { url = "https://files.pythonhosted.org/packages/67/4c/6d092bb99ea9ea6ebda817a0e7ad886f42a58b4501a7e27cd97371d0ba54/ruff-0.12.10-py3-none-musllinux_1_2_armv7l.whl", hash = "sha256:7d1a4e0bdfafcd2e3e235ecf50bf0176f74dd37902f241588ae1f6c827a36c56", size = 11701463, upload-time = "2025-08-21T18:23:03.211Z" }, - { url = "https://files.pythonhosted.org/packages/59/80/d982c55e91df981f3ab62559371380616c57ffd0172d96850280c2b04fa8/ruff-0.12.10-py3-none-musllinux_1_2_i686.whl", hash = "sha256:e67d96827854f50b9e3e8327b031647e7bcc090dbe7bb11101a81a3a2cbf1cc9", size = 12691603, upload-time = "2025-08-21T18:23:06.935Z" }, - { url = "https://files.pythonhosted.org/packages/ad/37/63a9c788bbe0b0850611669ec6b8589838faf2f4f959647f2d3e320383ae/ruff-0.12.10-py3-none-musllinux_1_2_x86_64.whl", hash = "sha256:ae479e1a18b439c59138f066ae79cc0f3ee250712a873d00dbafadaad9481e5b", size = 13164356, upload-time = "2025-08-21T18:23:10.225Z" }, - { url = "https://files.pythonhosted.org/packages/47/d4/1aaa7fb201a74181989970ebccd12f88c0fc074777027e2a21de5a90657e/ruff-0.12.10-py3-none-win32.whl", hash = "sha256:9de785e95dc2f09846c5e6e1d3a3d32ecd0b283a979898ad427a9be7be22b266", size = 11896089, upload-time = "2025-08-21T18:23:14.232Z" }, - { url = "https://files.pythonhosted.org/packages/ad/14/2ad38fd4037daab9e023456a4a40ed0154e9971f8d6aed41bdea390aabd9/ruff-0.12.10-py3-none-win_amd64.whl", hash = "sha256:7837eca8787f076f67aba2ca559cefd9c5cbc3a9852fd66186f4201b87c1563e", size = 13004616, upload-time = "2025-08-21T18:23:17.422Z" }, - { url = "https://files.pythonhosted.org/packages/24/3c/21cf283d67af33a8e6ed242396863af195a8a6134ec581524fd22b9811b6/ruff-0.12.10-py3-none-win_arm64.whl", hash = "sha256:cc138cc06ed9d4bfa9d667a65af7172b47840e1a98b02ce7011c391e54635ffc", size = 12074225, upload-time = "2025-08-21T18:23:20.137Z" }, + { url = "https://files.pythonhosted.org/packages/23/bf/e6e4324238c17f9d9120a9d60aa99a7daaa21204c07fcd84e2ef03bb5fd1/ruff-0.15.1-py3-none-linux_armv6l.whl", hash = "sha256:b101ed7cf4615bda6ffe65bdb59f964e9f4a0d3f85cbf0e54f0ab76d7b90228a", size = 10367819, upload-time = "2026-02-12T23:09:03.598Z" }, + { url = "https://files.pythonhosted.org/packages/b3/ea/c8f89d32e7912269d38c58f3649e453ac32c528f93bb7f4219258be2e7ed/ruff-0.15.1-py3-none-macosx_10_12_x86_64.whl", hash = "sha256:939c995e9277e63ea632cc8d3fae17aa758526f49a9a850d2e7e758bfef46602", size = 10798618, upload-time = "2026-02-12T23:09:22.928Z" }, + { url = "https://files.pythonhosted.org/packages/5e/0f/1d0d88bc862624247d82c20c10d4c0f6bb2f346559d8af281674cf327f15/ruff-0.15.1-py3-none-macosx_11_0_arm64.whl", hash = "sha256:1d83466455fdefe60b8d9c8df81d3c1bbb2115cede53549d3b522ce2bc703899", size = 10148518, upload-time = "2026-02-12T23:08:58.339Z" }, + { url = "https://files.pythonhosted.org/packages/f5/c8/291c49cefaa4a9248e986256df2ade7add79388fe179e0691be06fae6f37/ruff-0.15.1-py3-none-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a9457e3c3291024866222b96108ab2d8265b477e5b1534c7ddb1810904858d16", size = 10518811, upload-time = "2026-02-12T23:09:31.865Z" }, + { url = "https://files.pythonhosted.org/packages/c3/1a/f5707440e5ae43ffa5365cac8bbb91e9665f4a883f560893829cf16a606b/ruff-0.15.1-py3-none-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:92c92b003e9d4f7fbd33b1867bb15a1b785b1735069108dfc23821ba045b29bc", size = 10196169, upload-time = "2026-02-12T23:09:17.306Z" }, + { url = "https://files.pythonhosted.org/packages/2a/ff/26ddc8c4da04c8fd3ee65a89c9fb99eaa5c30394269d424461467be2271f/ruff-0.15.1-py3-none-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:1fe5c41ab43e3a06778844c586251eb5a510f67125427625f9eb2b9526535779", size = 10990491, upload-time = "2026-02-12T23:09:25.503Z" }, + { url = "https://files.pythonhosted.org/packages/fc/00/50920cb385b89413f7cdb4bb9bc8fc59c1b0f30028d8bccc294189a54955/ruff-0.15.1-py3-none-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:66a6dd6df4d80dc382c6484f8ce1bcceb55c32e9f27a8b94c32f6c7331bf14fb", size = 11843280, upload-time = "2026-02-12T23:09:19.88Z" }, + { url = "https://files.pythonhosted.org/packages/5d/6d/2f5cad8380caf5632a15460c323ae326f1e1a2b5b90a6ee7519017a017ca/ruff-0.15.1-py3-none-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:6a4a42cbb8af0bda9bcd7606b064d7c0bc311a88d141d02f78920be6acb5aa83", size = 11274336, upload-time = "2026-02-12T23:09:14.907Z" }, + { url = "https://files.pythonhosted.org/packages/a3/1d/5f56cae1d6c40b8a318513599b35ea4b075d7dc1cd1d04449578c29d1d75/ruff-0.15.1-py3-none-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:4ab064052c31dddada35079901592dfba2e05f5b1e43af3954aafcbc1096a5b2", size = 11137288, upload-time = "2026-02-12T23:09:07.475Z" }, + { url = "https://files.pythonhosted.org/packages/cd/20/6f8d7d8f768c93b0382b33b9306b3b999918816da46537d5a61635514635/ruff-0.15.1-py3-none-manylinux_2_31_riscv64.whl", hash = "sha256:5631c940fe9fe91f817a4c2ea4e81f47bee3ca4aa646134a24374f3c19ad9454", size = 11070681, upload-time = "2026-02-12T23:08:55.43Z" }, + { url = "https://files.pythonhosted.org/packages/9a/67/d640ac76069f64cdea59dba02af2e00b1fa30e2103c7f8d049c0cff4cafd/ruff-0.15.1-py3-none-musllinux_1_2_aarch64.whl", hash = "sha256:68138a4ba184b4691ccdc39f7795c66b3c68160c586519e7e8444cf5a53e1b4c", size = 10486401, upload-time = "2026-02-12T23:09:27.927Z" }, + { url = "https://files.pythonhosted.org/packages/65/3d/e1429f64a3ff89297497916b88c32a5cc88eeca7e9c787072d0e7f1d3e1e/ruff-0.15.1-py3-none-musllinux_1_2_armv7l.whl", hash = "sha256:518f9af03bfc33c03bdb4cb63fabc935341bb7f54af500f92ac309ecfbba6330", size = 10197452, upload-time = "2026-02-12T23:09:12.147Z" }, + { url = "https://files.pythonhosted.org/packages/78/83/e2c3bade17dad63bf1e1c2ffaf11490603b760be149e1419b07049b36ef2/ruff-0.15.1-py3-none-musllinux_1_2_i686.whl", hash = "sha256:da79f4d6a826caaea95de0237a67e33b81e6ec2e25fc7e1993a4015dffca7c61", size = 10693900, upload-time = "2026-02-12T23:09:34.418Z" }, + { url = "https://files.pythonhosted.org/packages/a1/27/fdc0e11a813e6338e0706e8b39bb7a1d61ea5b36873b351acee7e524a72a/ruff-0.15.1-py3-none-musllinux_1_2_x86_64.whl", hash = "sha256:3dd86dccb83cd7d4dcfac303ffc277e6048600dfc22e38158afa208e8bf94a1f", size = 11227302, upload-time = "2026-02-12T23:09:36.536Z" }, + { url = "https://files.pythonhosted.org/packages/f6/58/ac864a75067dcbd3b95be5ab4eb2b601d7fbc3d3d736a27e391a4f92a5c1/ruff-0.15.1-py3-none-win32.whl", hash = "sha256:660975d9cb49b5d5278b12b03bb9951d554543a90b74ed5d366b20e2c57c2098", size = 10462555, upload-time = "2026-02-12T23:09:29.899Z" }, + { url = "https://files.pythonhosted.org/packages/e0/5e/d4ccc8a27ecdb78116feac4935dfc39d1304536f4296168f91ed3ec00cd2/ruff-0.15.1-py3-none-win_amd64.whl", hash = "sha256:c820fef9dd5d4172a6570e5721704a96c6679b80cf7be41659ed439653f62336", size = 11599956, upload-time = "2026-02-12T23:09:01.157Z" }, + { url = "https://files.pythonhosted.org/packages/2a/07/5bda6a85b220c64c65686bc85bd0bbb23b29c62b3a9f9433fa55f17cda93/ruff-0.15.1-py3-none-win_arm64.whl", hash = "sha256:5ff7d5f0f88567850f45081fac8f4ec212be8d0b963e385c3f7d0d2eb4899416", size = 10874604, upload-time = "2026-02-12T23:09:05.515Z" }, ] [[package]] @@ -5881,6 +5882,30 @@ wheels = [ { url = "https://files.pythonhosted.org/packages/b3/26/61f236b52fd5e9161e21e1074c8133c9402945f4cf13612d9f9792ea0b0f/turbopuffer-0.6.5-py3-none-any.whl", hash = "sha256:d0c2261fcce5fa0ae9d82b103c3cf5d90cb2da263b76a41d8f121714f60a4e5c", size = 104879, upload-time = "2025-08-18T20:58:14.171Z" }, ] +[[package]] +name = "ty" +version = "0.0.17" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/66/c3/41ae6346443eedb65b96761abfab890a48ce2aa5a8a27af69c5c5d99064d/ty-0.0.17.tar.gz", hash = "sha256:847ed6c120913e280bf9b54d8eaa7a1049708acb8824ad234e71498e8ad09f97", size = 5167209, upload-time = "2026-02-13T13:26:36.835Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/c0/01/0ef15c22a1c54b0f728ceff3f62d478dbf8b0dcf8ff7b80b954f79584f3e/ty-0.0.17-py3-none-linux_armv6l.whl", hash = "sha256:64a9a16555cc8867d35c2647c2f1afbd3cae55f68fd95283a574d1bb04fe93e0", size = 10192793, upload-time = "2026-02-13T13:27:13.943Z" }, + { url = "https://files.pythonhosted.org/packages/0f/2c/f4c322d9cded56edc016b1092c14b95cf58c8a33b4787316ea752bb9418e/ty-0.0.17-py3-none-macosx_10_12_x86_64.whl", hash = "sha256:eb2dbd8acd5c5a55f4af0d479523e7c7265a88542efe73ed3d696eb1ba7b6454", size = 10051977, upload-time = "2026-02-13T13:26:57.741Z" }, + { url = "https://files.pythonhosted.org/packages/4c/a5/43746c1ff81e784f5fc303afc61fe5bcd85d0fcf3ef65cb2cef78c7486c7/ty-0.0.17-py3-none-macosx_11_0_arm64.whl", hash = "sha256:f18f5fd927bc628deb9ea2df40f06b5f79c5ccf355db732025a3e8e7152801f6", size = 9564639, upload-time = "2026-02-13T13:26:42.781Z" }, + { url = "https://files.pythonhosted.org/packages/d6/b8/280b04e14a9c0474af574f929fba2398b5e1c123c1e7735893b4cd73d13c/ty-0.0.17-py3-none-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:5383814d1d7a5cc53b3b07661856bab04bb2aac7a677c8d33c55169acdaa83df", size = 10061204, upload-time = "2026-02-13T13:27:00.152Z" }, + { url = "https://files.pythonhosted.org/packages/2a/d7/493e1607d8dfe48288d8a768a2adc38ee27ef50e57f0af41ff273987cda0/ty-0.0.17-py3-none-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:9c20423b8744b484f93e7bf2ef8a9724bca2657873593f9f41d08bd9f83444c9", size = 10013116, upload-time = "2026-02-13T13:26:34.543Z" }, + { url = "https://files.pythonhosted.org/packages/80/ef/22f3ed401520afac90dbdf1f9b8b7755d85b0d5c35c1cb35cf5bd11b59c2/ty-0.0.17-py3-none-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:e6f5b1aba97db9af86517b911674b02f5bc310750485dc47603a105bd0e83ddd", size = 10533623, upload-time = "2026-02-13T13:26:31.449Z" }, + { url = "https://files.pythonhosted.org/packages/75/ce/744b15279a11ac7138832e3a55595706b4a8a209c9f878e3ab8e571d9032/ty-0.0.17-py3-none-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:488bce1a9bea80b851a97cd34c4d2ffcd69593d6c3f54a72ae02e5c6e47f3d0c", size = 11069750, upload-time = "2026-02-13T13:26:48.638Z" }, + { url = "https://files.pythonhosted.org/packages/f2/be/1133c91f15a0e00d466c24f80df486d630d95d1b2af63296941f7473812f/ty-0.0.17-py3-none-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:8df66b91ec84239420985ec215e7f7549bfda2ac036a3b3c065f119d1c06825a", size = 10870862, upload-time = "2026-02-13T13:26:54.715Z" }, + { url = "https://files.pythonhosted.org/packages/3e/4a/a2ed209ef215b62b2d3246e07e833081e07d913adf7e0448fc204be443d6/ty-0.0.17-py3-none-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:002139e807c53002790dfefe6e2f45ab0e04012e76db3d7c8286f96ec121af8f", size = 10628118, upload-time = "2026-02-13T13:26:45.439Z" }, + { url = "https://files.pythonhosted.org/packages/b3/0c/87476004cb5228e9719b98afffad82c3ef1f84334bde8527bcacba7b18cb/ty-0.0.17-py3-none-musllinux_1_2_aarch64.whl", hash = "sha256:6c4e01f05ce82e5d489ab3900ca0899a56c4ccb52659453780c83e5b19e2b64c", size = 10038185, upload-time = "2026-02-13T13:27:02.693Z" }, + { url = "https://files.pythonhosted.org/packages/46/4b/98f0b3ba9aef53c1f0305519536967a4aa793a69ed72677b0a625c5313ac/ty-0.0.17-py3-none-musllinux_1_2_armv7l.whl", hash = "sha256:2b226dd1e99c0d2152d218c7e440150d1a47ce3c431871f0efa073bbf899e881", size = 10047644, upload-time = "2026-02-13T13:27:05.474Z" }, + { url = "https://files.pythonhosted.org/packages/93/e0/06737bb80aa1a9103b8651d2eb691a7e53f1ed54111152be25f4a02745db/ty-0.0.17-py3-none-musllinux_1_2_i686.whl", hash = "sha256:8b11f1da7859e0ad69e84b3c5ef9a7b055ceed376a432fad44231bdfc48061c2", size = 10231140, upload-time = "2026-02-13T13:27:10.844Z" }, + { url = "https://files.pythonhosted.org/packages/7c/79/e2a606bd8852383ba9abfdd578f4a227bd18504145381a10a5f886b4e751/ty-0.0.17-py3-none-musllinux_1_2_x86_64.whl", hash = "sha256:c04e196809ff570559054d3e011425fd7c04161529eb551b3625654e5f2434cb", size = 10718344, upload-time = "2026-02-13T13:26:51.66Z" }, + { url = "https://files.pythonhosted.org/packages/c5/2d/2663984ac11de6d78f74432b8b14ba64d170b45194312852b7543cf7fd56/ty-0.0.17-py3-none-win32.whl", hash = "sha256:305b6ed150b2740d00a817b193373d21f0767e10f94ac47abfc3b2e5a5aec809", size = 9672932, upload-time = "2026-02-13T13:27:08.522Z" }, + { url = "https://files.pythonhosted.org/packages/de/b5/39be78f30b31ee9f5a585969930c7248354db90494ff5e3d0756560fb731/ty-0.0.17-py3-none-win_amd64.whl", hash = "sha256:531828267527aee7a63e972f54e5eee21d9281b72baf18e5c2850c6b862add83", size = 10542138, upload-time = "2026-02-13T13:27:17.084Z" }, + { url = "https://files.pythonhosted.org/packages/40/b7/f875c729c5d0079640c75bad2c7e5d43edc90f16ba242f28a11966df8f65/ty-0.0.17-py3-none-win_arm64.whl", hash = "sha256:de9810234c0c8d75073457e10a84825b9cd72e6629826b7f01c7a0b266ae25b1", size = 10023068, upload-time = "2026-02-13T13:26:39.637Z" }, +] + [[package]] name = "typeguard" version = "4.4.4"