feat: modal sandbox

This commit is contained in:
Andy Li
2025-07-30 11:25:40 -07:00
committed by GitHub
parent 60a0a5264d
commit 277c26a58e
26 changed files with 529 additions and 197 deletions

View File

@@ -0,0 +1,55 @@
"""support modal sandbox type
Revision ID: 4c6c9ef0387d
Revises: 4537f0996495
Create Date: 2025-07-29 15:10:08.996251
"""
from typing import Sequence, Union
from sqlalchemy import text
from alembic import op
from letta.settings import DatabaseChoice, settings
# revision identifiers, used by Alembic.
revision: str = "4c6c9ef0387d"
down_revision: Union[str, None] = "4537f0996495"
branch_labels: Union[str, Sequence[str], None] = None
depends_on: Union[str, Sequence[str], None] = None
def upgrade() -> None:
# SQLite just uses strings
if settings.database_engine == DatabaseChoice.POSTGRES:
op.execute("ALTER TYPE sandboxtype ADD VALUE 'MODAL' AFTER 'E2B'")
def downgrade() -> None:
if settings.database_engine == DatabaseChoice.POSTGRES:
connection = op.get_bind()
data_conflicts = connection.execute(
text(
"""
SELECT COUNT(*)
FROM sandbox_configs
WHERE "type" NOT IN ('E2B', 'LOCAL')
"""
)
).fetchone()
if data_conflicts[0]:
raise RuntimeError(
(
"Cannot downgrade enum: Data conflicts are detected in sandbox_configs.sandboxtype.\n"
"Please manually handle these records before handling the downgrades.\n"
f"{data_conflicts} invalid sandboxtype values"
)
)
# Postgres does not support dropping enum values. Create a new enum and swap them.
op.execute("CREATE TYPE sandboxtype_old AS ENUM ('E2B', 'LOCAL')")
op.execute('ALTER TABLE sandbox_configs ALTER COLUMN "type" TYPE sandboxtype_old USING "type"::text::sandboxtype_old')
op.execute("DROP TYPE sandboxtype")
op.execute("ALTER TYPE sandboxtype_old RENAME to sandboxtype")

View File

@@ -19,7 +19,7 @@ from letta.log import get_logger
from letta.orm.enums import ToolType
from letta.otel.tracing import log_event, trace_method
from letta.schemas.agent import AgentState
from letta.schemas.enums import AgentStepStatus, JobStatus, MessageStreamStatus, ProviderType
from letta.schemas.enums import AgentStepStatus, JobStatus, MessageStreamStatus, ProviderType, SandboxType
from letta.schemas.job import JobUpdate
from letta.schemas.letta_message import LegacyLettaMessage, LettaMessage
from letta.schemas.letta_message_content import OmittedReasoningContent, ReasoningContent, RedactedReasoningContent, TextContent
@@ -28,7 +28,7 @@ from letta.schemas.letta_response import LettaBatchResponse, LettaResponse
from letta.schemas.llm_batch_job import AgentStepState, LLMBatchItem
from letta.schemas.message import Message, MessageCreate
from letta.schemas.openai.chat_completion_response import ToolCall as OpenAIToolCall
from letta.schemas.sandbox_config import SandboxConfig, SandboxType
from letta.schemas.sandbox_config import SandboxConfig
from letta.schemas.tool_execution_result import ToolExecutionResult
from letta.schemas.user import User
from letta.server.rest_api.utils import create_heartbeat_system_message, create_letta_messages_from_llm_response

View File

@@ -1,17 +1,7 @@
from collections import OrderedDict
from typing import Any, Dict, Optional
from letta.constants import COMPOSIO_ENTITY_ENV_VAR_KEY, PRE_EXECUTION_MESSAGE_ARG
from letta.functions.ast_parsers import coerce_dict_args_by_annotations, get_function_annotations_from_source
from letta.functions.composio_helpers import execute_composio_action, generate_composio_action_from_func_name
from letta.helpers.composio_helpers import get_composio_api_key
from letta.orm.enums import ToolType
from letta.schemas.agent import AgentState
from letta.schemas.sandbox_config import SandboxRunResult
from letta.schemas.tool import Tool
from letta.schemas.user import User
from letta.services.tool_executor.tool_execution_sandbox import ToolExecutionSandbox
from letta.utils import get_friendly_error_msg
from letta.constants import PRE_EXECUTION_MESSAGE_ARG
def enable_strict_mode(tool_schema: Dict[str, Any]) -> Dict[str, Any]:
@@ -44,6 +34,7 @@ def add_pre_execution_message(tool_schema: Dict[str, Any], description: Optional
Args:
tool_schema (Dict[str, Any]): The original tool schema.
description (Optional[str]): Description of the tool schema. Defaults to None.
Returns:
Dict[str, Any]: A new tool schema with the `pre_execution_message` field added at the beginning.
@@ -117,57 +108,3 @@ def remove_request_heartbeat(tool_schema: Dict[str, Any]) -> Dict[str, Any]:
schema["parameters"] = {**parameters, "properties": properties, "required": required}
return schema
# TODO: Deprecate the `execute_external_tool` function on the agent body
def execute_external_tool(
agent_state: AgentState,
function_name: str,
function_args: dict,
target_letta_tool: Tool,
actor: User,
allow_agent_state_modifications: bool = False,
) -> tuple[Any, Optional[SandboxRunResult]]:
# TODO: need to have an AgentState object that actually has full access to the block data
# this is because the sandbox tools need to be able to access block.value to edit this data
try:
if target_letta_tool.tool_type == ToolType.EXTERNAL_COMPOSIO:
action_name = generate_composio_action_from_func_name(target_letta_tool.name)
# Get entity ID from the agent_state
entity_id = None
for env_var in agent_state.tool_exec_environment_variables:
if env_var.key == COMPOSIO_ENTITY_ENV_VAR_KEY:
entity_id = env_var.value
# Get composio_api_key
composio_api_key = get_composio_api_key(actor=actor)
function_response = execute_composio_action(
action_name=action_name, args=function_args, api_key=composio_api_key, entity_id=entity_id
)
return function_response, None
elif target_letta_tool.tool_type == ToolType.CUSTOM:
# Parse the source code to extract function annotations
annotations = get_function_annotations_from_source(target_letta_tool.source_code, function_name)
# Coerce the function arguments to the correct types based on the annotations
function_args = coerce_dict_args_by_annotations(function_args, annotations)
# execute tool in a sandbox
# TODO: allow agent_state to specify which sandbox to execute tools in
# TODO: This is only temporary, can remove after we publish a pip package with this object
if allow_agent_state_modifications:
agent_state_copy = agent_state.__deepcopy__()
agent_state_copy.tools = []
agent_state_copy.tool_rules = []
else:
agent_state_copy = None
tool_execution_result = ToolExecutionSandbox(function_name, function_args, actor).run(agent_state=agent_state_copy)
function_response, updated_agent_state = tool_execution_result.func_return, tool_execution_result.agent_state
# TODO: Bring this back
# if allow_agent_state_modifications and updated_agent_state is not None:
# self.update_memory_if_changed(updated_agent_state.memory)
return function_response, tool_execution_result
except Exception as e:
# Need to catch error here, or else trunction wont happen
# TODO: modify to function execution error
function_response = get_friendly_error_msg(function_name=function_name, exception_name=type(e).__name__, exception_message=str(e))
return function_response, None

View File

@@ -1,15 +0,0 @@
"""__all__ acts as manual import management to avoid collisions and circular imports."""
# from letta.orm.agent import Agent
# from letta.orm.users_agents import UsersAgents
# from letta.orm.blocks_agents import BlocksAgents
# from letta.orm.token import Token
# from letta.orm.source import Source
# from letta.orm.document import Document
# from letta.orm.passage import Passage
# from letta.orm.memory_templates import MemoryTemplate, HumanMemoryTemplate, PersonaMemoryTemplate
# from letta.orm.sources_agents import SourcesAgents
# from letta.orm.tools_agents import ToolsAgents
# from letta.orm.job import Job
# from letta.orm.block import Block
# from letta.orm.message import Message

View File

@@ -17,6 +17,5 @@ class ToolType(str, Enum):
LETTA_BUILTIN = "letta_builtin"
LETTA_FILES_CORE = "letta_files_core"
EXTERNAL_COMPOSIO = "external_composio"
EXTERNAL_LANGCHAIN = "external_langchain"
# TODO is "external" the right name here? Since as of now, MCP is local / doesn't support remote?
EXTERNAL_MCP = "external_mcp"

View File

@@ -8,9 +8,9 @@ from sqlalchemy.orm import Mapped, mapped_column, relationship
from letta.orm.mixins import AgentMixin, OrganizationMixin, SandboxConfigMixin
from letta.orm.sqlalchemy_base import SqlalchemyBase
from letta.schemas.enums import SandboxType
from letta.schemas.environment_variables import SandboxEnvironmentVariable as PydanticSandboxEnvironmentVariable
from letta.schemas.sandbox_config import SandboxConfig as PydanticSandboxConfig
from letta.schemas.sandbox_config import SandboxType
if TYPE_CHECKING:
from letta.orm.agent import Agent

View File

@@ -153,3 +153,9 @@ class DuplicateFileHandling(str, Enum):
SKIP = "skip" # skip files with duplicate names
ERROR = "error" # error when duplicate names are encountered
SUFFIX = "suffix" # add numeric suffix to make names unique (default behavior)
class SandboxType(str, Enum):
E2B = "e2b"
MODAL = "modal"
LOCAL = "local"

View File

@@ -1,21 +1,17 @@
import hashlib
import json
from enum import Enum
from typing import Any, Dict, List, Literal, Optional, Union
from pydantic import BaseModel, Field, model_validator
from letta.constants import LETTA_TOOL_EXECUTION_DIR
from letta.schemas.agent import AgentState
from letta.schemas.enums import SandboxType
from letta.schemas.letta_base import LettaBase, OrmMetadataBase
from letta.schemas.pip_requirement import PipRequirement
from letta.settings import tool_settings
# Sandbox Config
class SandboxType(str, Enum):
E2B = "e2b"
LOCAL = "local"
class SandboxRunResult(BaseModel):
@@ -83,6 +79,15 @@ class E2BSandboxConfig(BaseModel):
return data
class ModalSandboxConfig(BaseModel):
timeout: int = Field(5 * 60, description="Time limit for the sandbox (in seconds).")
pip_requirements: Optional[List[str]] = Field(None, description="A list of pip packages to install in the Modal sandbox")
@property
def type(self) -> "SandboxType":
return SandboxType.MODAL
class SandboxConfigBase(OrmMetadataBase):
__id_prefix__ = "sandbox"
@@ -99,6 +104,9 @@ class SandboxConfig(SandboxConfigBase):
def get_local_config(self) -> LocalSandboxConfig:
return LocalSandboxConfig(**self.config)
def get_modal_config(self) -> ModalSandboxConfig:
return ModalSandboxConfig(**self.config)
def fingerprint(self) -> str:
# Only take into account type, org_id, and the config items
# Canonicalize input data into JSON with sorted keys
@@ -120,10 +128,12 @@ class SandboxConfig(SandboxConfigBase):
class SandboxConfigCreate(LettaBase):
config: Union[LocalSandboxConfig, E2BSandboxConfig] = Field(..., description="The configuration for the sandbox.")
config: Union[LocalSandboxConfig, E2BSandboxConfig, ModalSandboxConfig] = Field(..., description="The configuration for the sandbox.")
class SandboxConfigUpdate(LettaBase):
"""Pydantic model for updating SandboxConfig fields."""
config: Union[LocalSandboxConfig, E2BSandboxConfig] = Field(None, description="The JSON configuration data for the sandbox.")
config: Union[LocalSandboxConfig, E2BSandboxConfig, ModalSandboxConfig] = Field(
None, description="The JSON configuration data for the sandbox."
)

View File

@@ -5,11 +5,12 @@ from typing import List, Optional
from fastapi import APIRouter, Depends, HTTPException, Query
from letta.log import get_logger
from letta.schemas.enums import SandboxType
from letta.schemas.environment_variables import SandboxEnvironmentVariable as PydanticEnvVar
from letta.schemas.environment_variables import SandboxEnvironmentVariableCreate, SandboxEnvironmentVariableUpdate
from letta.schemas.sandbox_config import LocalSandboxConfig
from letta.schemas.sandbox_config import SandboxConfig as PydanticSandboxConfig
from letta.schemas.sandbox_config import SandboxConfigCreate, SandboxConfigUpdate, SandboxType
from letta.schemas.sandbox_config import SandboxConfigCreate, SandboxConfigUpdate
from letta.server.rest_api.utils import get_letta_server, get_user_id
from letta.server.server import SyncServer
from letta.services.helpers.tool_execution_helper import create_venv_for_local_sandbox, install_pip_requirements_for_sandbox

View File

@@ -40,7 +40,7 @@ from letta.schemas.block import Block, BlockUpdate, CreateBlock
from letta.schemas.embedding_config import EmbeddingConfig
# openai schemas
from letta.schemas.enums import JobStatus, MessageStreamStatus, ProviderCategory, ProviderType
from letta.schemas.enums import JobStatus, MessageStreamStatus, ProviderCategory, ProviderType, SandboxType
from letta.schemas.environment_variables import SandboxEnvironmentVariableCreate
from letta.schemas.group import GroupCreate, ManagerType, SleeptimeManager, VoiceSleeptimeManager
from letta.schemas.job import Job, JobUpdate
@@ -69,7 +69,7 @@ from letta.schemas.providers import (
TogetherProvider,
XAIProvider,
)
from letta.schemas.sandbox_config import LocalSandboxConfig, SandboxConfigCreate, SandboxType
from letta.schemas.sandbox_config import LocalSandboxConfig, SandboxConfigCreate
from letta.schemas.source import Source
from letta.schemas.tool import Tool
from letta.schemas.usage import LettaUsageStatistics

View File

@@ -6,11 +6,12 @@ from letta.orm.errors import NoResultFound
from letta.orm.sandbox_config import SandboxConfig as SandboxConfigModel
from letta.orm.sandbox_config import SandboxEnvironmentVariable as SandboxEnvVarModel
from letta.otel.tracing import trace_method
from letta.schemas.enums import SandboxType
from letta.schemas.environment_variables import SandboxEnvironmentVariable as PydanticEnvVar
from letta.schemas.environment_variables import SandboxEnvironmentVariableCreate, SandboxEnvironmentVariableUpdate
from letta.schemas.sandbox_config import LocalSandboxConfig
from letta.schemas.sandbox_config import SandboxConfig as PydanticSandboxConfig
from letta.schemas.sandbox_config import SandboxConfigCreate, SandboxConfigUpdate, SandboxType
from letta.schemas.sandbox_config import SandboxConfigCreate, SandboxConfigUpdate
from letta.schemas.user import User as PydanticUser
from letta.server.db import db_registry
from letta.utils import enforce_types, printd
@@ -493,10 +494,7 @@ class SandboxConfigManager:
self, sandbox_config_id: str, actor: PydanticUser, after: Optional[str] = None, limit: Optional[int] = 50
) -> Dict[str, str]:
env_vars = await self.list_sandbox_env_vars_async(sandbox_config_id, actor, after, limit)
result = {}
for env_var in env_vars:
result[env_var.key] = env_var.value
return result
return {env_var.key: env_var.value for env_var in env_vars}
@enforce_types
@trace_method

View File

@@ -5,6 +5,7 @@ from letta.functions.ast_parsers import coerce_dict_args_by_annotations, get_fun
from letta.log import get_logger
from letta.otel.tracing import trace_method
from letta.schemas.agent import AgentState
from letta.schemas.enums import SandboxType
from letta.schemas.sandbox_config import SandboxConfig
from letta.schemas.tool import Tool
from letta.schemas.tool_execution_result import ToolExecutionResult
@@ -12,6 +13,7 @@ from letta.schemas.user import User
from letta.services.agent_manager import AgentManager
from letta.services.tool_executor.tool_executor_base import ToolExecutor
from letta.services.tool_sandbox.local_sandbox import AsyncToolSandboxLocal
from letta.services.tool_sandbox.modal_sandbox import AsyncToolSandboxModal
from letta.settings import tool_settings
from letta.types import JsonDict
from letta.utils import get_friendly_error_msg
@@ -50,10 +52,14 @@ class SandboxToolExecutor(ToolExecutor):
agent_state_copy = self._create_agent_state_copy(agent_state) if agent_state else None
# Execute in sandbox depending on API key
if tool_settings.e2b_api_key:
if tool_settings.sandbox_type == SandboxType.E2B:
sandbox = AsyncToolSandboxE2B(
function_name, function_args, actor, tool_object=tool, sandbox_config=sandbox_config, sandbox_env_vars=sandbox_env_vars
)
elif tool_settings.sandbox_type == SandboxType.MODAL:
sandbox = AsyncToolSandboxModal(
function_name, function_args, actor, tool_object=tool, sandbox_config=sandbox_config, sandbox_env_vars=sandbox_env_vars
)
else:
sandbox = AsyncToolSandboxLocal(
function_name, function_args, actor, tool_object=tool, sandbox_config=sandbox_config, sandbox_env_vars=sandbox_env_vars
@@ -61,6 +67,9 @@ class SandboxToolExecutor(ToolExecutor):
tool_execution_result = await sandbox.run(agent_state=agent_state_copy)
log_lines = (tool_execution_result.stdout or []) + (tool_execution_result.stderr or [])
logger.debug("Tool execution log: %s", "\n".join(log_lines))
# Verify memory integrity
if agent_state:
new_memory_str = await agent_state.memory.compile_async()

View File

@@ -24,7 +24,7 @@ from letta.services.tool_executor.core_tool_executor import LettaCoreToolExecuto
from letta.services.tool_executor.files_tool_executor import LettaFileToolExecutor
from letta.services.tool_executor.mcp_tool_executor import ExternalMCPToolExecutor
from letta.services.tool_executor.multi_agent_tool_executor import LettaMultiAgentToolExecutor
from letta.services.tool_executor.tool_executor import SandboxToolExecutor
from letta.services.tool_executor.sandbox_tool_executor import SandboxToolExecutor
from letta.services.tool_executor.tool_executor_base import ToolExecutor
from letta.utils import get_friendly_error_msg

View File

@@ -13,7 +13,8 @@ from letta.functions.helpers import generate_model_from_args_json_schema
from letta.log import get_logger
from letta.otel.tracing import log_event, trace_method
from letta.schemas.agent import AgentState
from letta.schemas.sandbox_config import SandboxConfig, SandboxType
from letta.schemas.enums import SandboxType
from letta.schemas.sandbox_config import SandboxConfig
from letta.schemas.tool import Tool
from letta.schemas.tool_execution_result import ToolExecutionResult
from letta.schemas.user import User

View File

@@ -143,13 +143,6 @@ class ToolManager:
PydanticTool(tool_type=ToolType.EXTERNAL_COMPOSIO, name=tool_create.json_schema["name"], **tool_create.model_dump()), actor
)
@enforce_types
@trace_method
def create_or_update_langchain_tool(self, tool_create: ToolCreate, actor: PydanticUser) -> PydanticTool:
return self.create_or_update_tool(
PydanticTool(tool_type=ToolType.EXTERNAL_LANGCHAIN, name=tool_create.json_schema["name"], **tool_create.model_dump()), actor
)
@enforce_types
@trace_method
def create_tool(self, pydantic_tool: PydanticTool, actor: PydanticUser) -> PydanticTool:

View File

@@ -1,3 +1,4 @@
import os
import pickle
import uuid
from abc import ABC, abstractmethod
@@ -188,5 +189,20 @@ class AsyncToolSandboxBase(ABC):
"""
return False # Default to False for local execution
def _update_env_vars(self):
pass # TODO
async def _gather_env_vars(self, agent_state: AgentState | None, additional_env_vars: dict[str, str], sbx_id: str, is_local: bool):
env = os.environ.copy() if is_local else {}
if self.provided_sandbox_env_vars:
env.update(self.provided_sandbox_env_vars)
else:
env_vars = await self.sandbox_config_manager.get_sandbox_env_vars_as_dict_async(
sandbox_config_id=sbx_id, actor=self.user, limit=None
)
env.update(env_vars)
if agent_state:
env.update(agent_state.get_agent_env_vars_as_dict())
if additional_env_vars:
env.update(additional_env_vars)
return env

View File

@@ -6,7 +6,8 @@ from e2b_code_interpreter import AsyncSandbox
from letta.log import get_logger
from letta.otel.tracing import log_event, trace_method
from letta.schemas.agent import AgentState
from letta.schemas.sandbox_config import SandboxConfig, SandboxType
from letta.schemas.enums import SandboxType
from letta.schemas.sandbox_config import SandboxConfig
from letta.schemas.tool import Tool
from letta.schemas.tool_execution_result import ToolExecutionResult
from letta.services.helpers.tool_parser_helper import parse_stdout_best_effort
@@ -41,22 +42,6 @@ class AsyncToolSandboxE2B(AsyncToolSandboxBase):
self,
agent_state: Optional[AgentState] = None,
additional_env_vars: Optional[Dict] = None,
) -> ToolExecutionResult:
"""
Run the tool in a sandbox environment asynchronously,
*always* using a subprocess for execution.
"""
result = await self.run_e2b_sandbox(agent_state=agent_state, additional_env_vars=additional_env_vars)
# Simple console logging for demonstration
for log_line in (result.stdout or []) + (result.stderr or []):
print(f"Tool execution log: {log_line}")
return result
@trace_method
async def run_e2b_sandbox(
self, agent_state: Optional[AgentState] = None, additional_env_vars: Optional[Dict] = None
) -> ToolExecutionResult:
if self.provided_sandbox_config:
sbx_config = self.provided_sandbox_config
@@ -76,30 +61,15 @@ class AsyncToolSandboxE2B(AsyncToolSandboxBase):
# await sbx.set_timeout(sbx_config.get_e2b_config().timeout)
# Get environment variables for the sandbox
# TODO: We set limit to 100 here, but maybe we want it uncapped? Realistically this should be fine.
env_vars = {}
if self.provided_sandbox_env_vars:
env_vars.update(self.provided_sandbox_env_vars)
else:
db_env_vars = await self.sandbox_config_manager.get_sandbox_env_vars_as_dict_async(
sandbox_config_id=sbx_config.id, actor=self.user, limit=100
)
env_vars.update(db_env_vars)
# Get environment variables for this agent specifically
if agent_state:
env_vars.update(agent_state.get_agent_env_vars_as_dict())
# Finally, get any that are passed explicitly into the `run` function call
if additional_env_vars:
env_vars.update(additional_env_vars)
envs = await self._gather_env_vars(agent_state, additional_env_vars, sbx_config.id, is_local=False)
code = await self.generate_execution_script(agent_state=agent_state)
try:
log_event(
"e2b_execution_started",
{"tool": self.tool_name, "sandbox_id": e2b_sandbox.sandbox_id, "code": code, "env_vars": env_vars},
{"tool": self.tool_name, "sandbox_id": e2b_sandbox.sandbox_id, "code": code, "env_vars": envs},
)
execution = await e2b_sandbox.run_code(code, envs=env_vars)
execution = await e2b_sandbox.run_code(code, envs=envs)
if execution.results:
func_return, agent_state = parse_stdout_best_effort(execution.results[0].text)

View File

@@ -11,7 +11,8 @@ from pydantic.config import JsonDict
from letta.log import get_logger
from letta.otel.tracing import log_event, trace_method
from letta.schemas.agent import AgentState
from letta.schemas.sandbox_config import SandboxConfig, SandboxType
from letta.schemas.enums import SandboxType
from letta.schemas.sandbox_config import SandboxConfig
from letta.schemas.tool import Tool
from letta.schemas.tool_execution_result import ToolExecutionResult
from letta.services.helpers.tool_execution_helper import (
@@ -44,34 +45,16 @@ class AsyncToolSandboxLocal(AsyncToolSandboxBase):
super().__init__(tool_name, args, user, tool_object, sandbox_config=sandbox_config, sandbox_env_vars=sandbox_env_vars)
self.force_recreate_venv = force_recreate_venv
@trace_method
async def run(
self,
agent_state: Optional[AgentState] = None,
additional_env_vars: Optional[Dict] = None,
) -> ToolExecutionResult:
"""
Run the tool in a sandbox environment asynchronously,
*always* using a subprocess for execution.
Run the tool in a local sandbox environment asynchronously.
Uses a subprocess for multi-core parallelism.
"""
result = await self.run_local_dir_sandbox(agent_state=agent_state, additional_env_vars=additional_env_vars)
# Simple console logging for demonstration
for log_line in (result.stdout or []) + (result.stderr or []):
print(f"Tool execution log: {log_line}")
return result
@trace_method
async def run_local_dir_sandbox(
self,
agent_state: Optional[AgentState],
additional_env_vars: Optional[Dict],
) -> ToolExecutionResult:
"""
Unified asynchronous method to run the tool in a local sandbox environment,
always via subprocess for multi-core parallelism.
"""
# Get sandbox configuration
if self.provided_sandbox_config:
sbx_config = self.provided_sandbox_config
else:

View File

@@ -0,0 +1,205 @@
from typing import Any, Dict, Optional
import modal
from letta.log import get_logger
from letta.otel.tracing import log_event, trace_method
from letta.schemas.agent import AgentState
from letta.schemas.enums import SandboxType
from letta.schemas.sandbox_config import SandboxConfig
from letta.schemas.tool import Tool
from letta.schemas.tool_execution_result import ToolExecutionResult
from letta.services.helpers.tool_parser_helper import parse_stdout_best_effort
from letta.services.tool_sandbox.base import AsyncToolSandboxBase
from letta.settings import tool_settings
from letta.types import JsonDict
from letta.utils import get_friendly_error_msg
logger = get_logger(__name__)
class AsyncToolSandboxModal(AsyncToolSandboxBase):
def __init__(
self,
tool_name: str,
args: JsonDict,
user,
tool_object: Tool | None = None,
sandbox_config: SandboxConfig | None = None,
sandbox_env_vars: dict[str, Any] | None = None,
):
super().__init__(tool_name, args, user, tool_object, sandbox_config=sandbox_config, sandbox_env_vars=sandbox_env_vars)
if not tool_settings.modal_api_key:
raise ValueError("Modal API key is required but not set in tool_settings.modal_api_key")
# Create a unique app name based on tool and config
self._app_name = self._generate_app_name()
def _generate_app_name(self) -> str:
"""Generate a unique app name based on tool and configuration. Created based on tool name and org"""
return f"{self.user.organization_id}-{self.tool_name}"
async def _fetch_or_create_modal_app(self, sbx_config: SandboxConfig, env_vars: Dict[str, str]) -> modal.App:
"""Create a Modal app with the tool function registered."""
app = await modal.App.lookup.aio(self._app_name)
modal_config = sbx_config.get_modal_config()
# Get the base image with dependencies
image = self._get_modal_image(sbx_config)
# Decorator for the tool, note information on running untrusted code: https://modal.com/docs/guide/restricted-access
# The `@app.function` decorator must apply to functions in global scope, unless `serialized=True` is set.
@app.function(image=image, timeout=modal_config.timeout, restrict_modal_access=True, max_inputs=1, serialized=True)
def execute_tool_with_script(execution_script: str, environment_vars: dict[str, str]):
"""Execute the generated tool script in Modal sandbox."""
import os
# Note: We pass environment variables directly instead of relying on Modal secrets
# This is more flexible and doesn't require pre-configured secrets
for key, value in environment_vars.items():
os.environ[key] = str(value)
exec_globals = {}
exec(execution_script, exec_globals)
# Store the function reference in the app for later use
app.remote_executor = execute_tool_with_script
return app
@trace_method
async def run(
self,
agent_state: Optional[AgentState] = None,
additional_env_vars: Optional[Dict] = None,
) -> ToolExecutionResult:
if self.provided_sandbox_config:
sbx_config = self.provided_sandbox_config
else:
sbx_config = await self.sandbox_config_manager.get_or_create_default_sandbox_config_async(
sandbox_type=SandboxType.MODAL, actor=self.user
)
envs = await self._gather_env_vars(agent_state, additional_env_vars or {}, sbx_config.id, is_local=False)
# Generate execution script (this includes the tool source code and execution logic)
execution_script = await self.generate_execution_script(agent_state=agent_state)
try:
log_event(
"modal_execution_started",
{"tool": self.tool_name, "app_name": self._app_name, "env_vars": list(envs)},
)
# Create Modal app with the tool function registered
app = await self._fetch_or_create_modal_app(sbx_config, envs)
# Execute the tool remotely
with app.run():
result = app.remote_executor.remote(execution_script, envs)
# Process the result
if result["error"]:
logger.error(
f"Executing tool {self.tool_name} raised a {result['error']['name']} with message: \n{result['error']['value']}"
)
logger.error(f"Traceback from Modal sandbox: \n{result['error']['traceback']}")
func_return = get_friendly_error_msg(
function_name=self.tool_name, exception_name=result["error"]["name"], exception_message=result["error"]["value"]
)
log_event(
"modal_execution_failed",
{
"tool": self.tool_name,
"app_name": self._app_name,
"error_type": result["error"]["name"],
"error_message": result["error"]["value"],
"func_return": func_return,
},
)
# Parse the result from stdout even if there was an error
# (in case the function returned something before failing)
agent_state = None # Initialize agent_state
try:
func_return_parsed, agent_state_parsed = parse_stdout_best_effort(result["stdout"])
if func_return_parsed is not None:
func_return = func_return_parsed
agent_state = agent_state_parsed
except Exception:
# If parsing fails, keep the error message
pass
else:
func_return, agent_state = parse_stdout_best_effort(result["stdout"])
log_event(
"modal_execution_succeeded",
{
"tool": self.tool_name,
"app_name": self._app_name,
"func_return": func_return,
},
)
return ToolExecutionResult(
func_return=func_return,
agent_state=agent_state,
stdout=[result["stdout"]] if result["stdout"] else [],
stderr=[result["stderr"]] if result["stderr"] else [],
status="error" if result["error"] else "success",
sandbox_config_fingerprint=sbx_config.fingerprint(),
)
except Exception as e:
logger.error(f"Modal execution for tool {self.tool_name} encountered an error: {e}")
func_return = get_friendly_error_msg(
function_name=self.tool_name,
exception_name=type(e).__name__,
exception_message=str(e),
)
log_event(
"modal_execution_error",
{
"tool": self.tool_name,
"app_name": self._app_name,
"error": str(e),
"func_return": func_return,
},
)
return ToolExecutionResult(
func_return=func_return,
agent_state=None,
stdout=[],
stderr=[str(e)],
status="error",
sandbox_config_fingerprint=sbx_config.fingerprint(),
)
def _get_modal_image(self, sbx_config: SandboxConfig) -> modal.Image:
"""Get Modal image with required public python dependencies.
Caching and rebuilding is handled in a cascading manner
https://modal.com/docs/guide/images#image-caching-and-rebuilds
"""
image = modal.Image.debian_slim(python_version="3.12")
all_requirements = ["letta"]
# Add sandbox-specific pip requirements
modal_configs = sbx_config.get_modal_config()
if modal_configs.pip_requirements:
all_requirements.extend([str(req) for req in modal_configs.pip_requirements])
# Add tool-specific pip requirements
if self.tool and self.tool.pip_requirements:
all_requirements.extend([str(req) for req in self.tool.pip_requirements])
if all_requirements:
image = image.pip_install(*all_requirements)
return image
def use_top_level_await(self) -> bool:
"""
Modal functions don't have an active event loop by default,
so we should use asyncio.run() like local execution.
"""
return False

View File

@@ -7,21 +7,22 @@ from pydantic import AliasChoices, Field
from pydantic_settings import BaseSettings, SettingsConfigDict
from letta.local_llm.constants import DEFAULT_WRAPPER_NAME, INNER_THOUGHTS_KWARG
from letta.schemas.enums import SandboxType
from letta.services.summarizer.enums import SummarizationMode
class ToolSettings(BaseSettings):
composio_api_key: Optional[str] = None
composio_api_key: str | None = Field(default=None, description="API key for Composio")
# E2B Sandbox configurations
e2b_api_key: Optional[str] = None
e2b_sandbox_template_id: Optional[str] = None # Updated manually
# Sandbox Configurations
e2b_api_key: str | None = Field(default=None, description="API key for using E2B as a tool sandbox")
e2b_sandbox_template_id: str | None = Field(default=None, description="Template ID for E2B Sandbox. Updated Manually.")
# Tavily search
tavily_api_key: Optional[str] = None
modal_api_key: str | None = Field(default=None, description="API key for using Modal as a tool sandbox")
# Firecrawl search
firecrawl_api_key: Optional[str] = None
# Search Providers
tavily_api_key: str | None = Field(default=None, description="API key for using Tavily as a search provider.")
firecrawl_api_key: str | None = Field(default=None, description="API key for using Firecrawl as a search provider.")
# Local Sandbox configurations
tool_exec_dir: Optional[str] = None
@@ -36,6 +37,15 @@ class ToolSettings(BaseSettings):
mcp_read_from_config: bool = False # if False, will throw if attempting to read/write from file
mcp_disable_stdio: bool = False
@property
def sandbox_type(self) -> SandboxType:
if self.e2b_api_key:
return SandboxType.E2B
elif self.modal_api_key:
return SandboxType.MODAL
else:
return SandboxType.LOCAL
class SummarizerSettings(BaseSettings):
model_config = SettingsConfigDict(env_prefix="letta_summarizer_", extra="ignore")

View File

@@ -1190,7 +1190,7 @@ async def get_latest_alembic_revision() -> str:
return "unknown"
except Exception as e:
logger.error(f"Error getting latest alembic revision: {e}")
logger.error("Error getting latest alembic revision: %s", e)
return "unknown"

172
poetry.lock generated
View File

@@ -1136,14 +1136,14 @@ files = [
[[package]]
name = "click"
version = "8.2.1"
version = "8.1.8"
description = "Composable command line interface toolkit"
optional = false
python-versions = ">=3.10"
python-versions = ">=3.7"
groups = ["main", "dev"]
files = [
{file = "click-8.2.1-py3-none-any.whl", hash = "sha256:61a3265b914e850b85317d0b3109c7f8cd35a670f963866005d6ef1d5175a12b"},
{file = "click-8.2.1.tar.gz", hash = "sha256:27c491cc05d968d271d5a1db13e3b5a184636d9d930f148c50b038f0d0646202"},
{file = "click-8.1.8-py3-none-any.whl", hash = "sha256:63c132bbbed01578a06712a2d1f497bb62d9c1c0d329b7903a866228027263b2"},
{file = "click-8.1.8.tar.gz", hash = "sha256:ed53c9d8990d83c2a27deae68e4ee337473f6330c040a31d4225c9574d16096a"},
]
[package.dependencies]
@@ -2808,6 +2808,26 @@ grpcio = ">=1.71.2"
protobuf = ">=5.26.1,<6.0dev"
setuptools = "*"
[[package]]
name = "grpclib"
version = "0.4.8"
description = "Pure-Python gRPC implementation for asyncio"
optional = true
python-versions = ">=3.8"
groups = ["main"]
markers = "extra == \"cloud-tool-sandbox\""
files = [
{file = "grpclib-0.4.8-py3-none-any.whl", hash = "sha256:a5047733a7acc1c1cee6abf3c841c7c6fab67d2844a45a853b113fa2e6cd2654"},
{file = "grpclib-0.4.8.tar.gz", hash = "sha256:d8823763780ef94fed8b2c562f7485cf0bbee15fc7d065a640673667f7719c9a"},
]
[package.dependencies]
h2 = ">=3.1.0,<5"
multidict = "*"
[package.extras]
protobuf = ["protobuf (>=3.20.0)"]
[[package]]
name = "h11"
version = "0.16.0"
@@ -2820,6 +2840,36 @@ files = [
{file = "h11-0.16.0.tar.gz", hash = "sha256:4e35b956cf45792e4caa5885e69fba00bdbc6ffafbfa020300e549b208ee5ff1"},
]
[[package]]
name = "h2"
version = "4.2.0"
description = "Pure-Python HTTP/2 protocol implementation"
optional = true
python-versions = ">=3.9"
groups = ["main"]
markers = "extra == \"cloud-tool-sandbox\""
files = [
{file = "h2-4.2.0-py3-none-any.whl", hash = "sha256:479a53ad425bb29af087f3458a61d30780bc818e4ebcf01f0b536ba916462ed0"},
{file = "h2-4.2.0.tar.gz", hash = "sha256:c8a52129695e88b1a0578d8d2cc6842bbd79128ac685463b887ee278126ad01f"},
]
[package.dependencies]
hpack = ">=4.1,<5"
hyperframe = ">=6.1,<7"
[[package]]
name = "hpack"
version = "4.1.0"
description = "Pure-Python HPACK header encoding"
optional = true
python-versions = ">=3.9"
groups = ["main"]
markers = "extra == \"cloud-tool-sandbox\""
files = [
{file = "hpack-4.1.0-py3-none-any.whl", hash = "sha256:157ac792668d995c657d93111f46b4535ed114f0c9c8d672271bbec7eae1b496"},
{file = "hpack-4.1.0.tar.gz", hash = "sha256:ec5eca154f7056aa06f196a557655c5b009b382873ac8d1e66e79e87535f1dca"},
]
[[package]]
name = "html2text"
version = "2020.1.16"
@@ -2922,6 +2972,19 @@ files = [
[package.dependencies]
pyreadline3 = {version = "*", markers = "sys_platform == \"win32\" and python_version >= \"3.8\""}
[[package]]
name = "hyperframe"
version = "6.1.0"
description = "Pure-Python HTTP/2 framing"
optional = true
python-versions = ">=3.9"
groups = ["main"]
markers = "extra == \"cloud-tool-sandbox\""
files = [
{file = "hyperframe-6.1.0-py3-none-any.whl", hash = "sha256:b03380493a519fce58ea5af42e4a42317bf9bd425596f7a0835ffce80f1a42e5"},
{file = "hyperframe-6.1.0.tar.gz", hash = "sha256:f630908a00854a7adeabd6382b43923a4c4cd4b821fcb527e6ab9e15382a3b08"},
]
[[package]]
name = "identify"
version = "2.6.12"
@@ -4499,6 +4562,34 @@ typing-inspection = ">=0.4.0"
agents = ["authlib (>=1.5.2,<2.0)", "griffe (>=1.7.3,<2.0)", "mcp (>=1.0,<2.0) ; python_version >= \"3.10\""]
gcp = ["google-auth (>=2.27.0)", "requests (>=2.32.3)"]
[[package]]
name = "modal"
version = "1.1.0"
description = "Python client library for Modal"
optional = true
python-versions = ">=3.9"
groups = ["main"]
markers = "extra == \"cloud-tool-sandbox\""
files = [
{file = "modal-1.1.0-py3-none-any.whl", hash = "sha256:985f47427f214e098768995782fda3915141af5f84007815db0c10c45e4a6a16"},
{file = "modal-1.1.0.tar.gz", hash = "sha256:190ea96d45fbdfd6d6cb545a736bf1ef5599511d346ae94cfd773528a33b6097"},
]
[package.dependencies]
aiohttp = "*"
certifi = "*"
click = ">=8.1.0,<8.2.0"
grpclib = ">=0.4.7,<0.4.9"
protobuf = ">=3.19,<4.24.0 || >4.24.0,<7.0"
rich = ">=12.0.0"
synchronicity = ">=0.10.1,<0.11.0"
toml = "*"
typer = ">=0.9"
types-certifi = "*"
types-toml = "*"
typing_extensions = ">=4.6,<5.0"
watchfiles = "*"
[[package]]
name = "mpmath"
version = "1.3.0"
@@ -7374,6 +7465,26 @@ files = [
{file = "shellingham-1.5.4.tar.gz", hash = "sha256:8dbca0739d487e5bd35ab3ca4b36e11c4078f3a234bfce294b0a0291363404de"},
]
[[package]]
name = "sigtools"
version = "4.0.1"
description = "Utilities for working with inspect.Signature objects."
optional = true
python-versions = ">=3.6"
groups = ["main"]
markers = "extra == \"cloud-tool-sandbox\""
files = [
{file = "sigtools-4.0.1-py2.py3-none-any.whl", hash = "sha256:d216b4cf920bbab0fce636ddc429ed8463a5b533d9e1492acb45a2a1bc36ac6c"},
{file = "sigtools-4.0.1.tar.gz", hash = "sha256:4b8e135a9cd4d2ea00da670c093372d74e672ba3abb87f4c98d8e73dea54445c"},
]
[package.dependencies]
attrs = "*"
[package.extras]
test = ["coverage", "mock", "repeated-test (>=2.2.1)", "sphinx"]
tests = ["coverage", "mock", "repeated-test (>=2.2.1)", "sphinx"]
[[package]]
name = "simple-websocket"
version = "1.1.0"
@@ -7709,6 +7820,23 @@ mpmath = ">=1.1.0,<1.4"
[package.extras]
dev = ["hypothesis (>=6.70.0)", "pytest (>=7.1.0)"]
[[package]]
name = "synchronicity"
version = "0.10.1"
description = "Export blocking and async library versions from a single async implementation"
optional = true
python-versions = ">=3.8"
groups = ["main"]
markers = "extra == \"cloud-tool-sandbox\""
files = [
{file = "synchronicity-0.10.1-py3-none-any.whl", hash = "sha256:af9c077586cf4895aea88fe9104d966f50b8fac730f79117383591acb5489952"},
{file = "synchronicity-0.10.1.tar.gz", hash = "sha256:4af861f215a11b885e18cf2985ba8b3a1aa0000d440a9b615402f724a453a8c2"},
]
[package.dependencies]
sigtools = ">=4.0.1"
typing-extensions = ">=4.12.2"
[[package]]
name = "tavily-python"
version = "0.7.10"
@@ -7807,10 +7935,10 @@ files = [
name = "toml"
version = "0.10.2"
description = "Python Library for Tom's Obvious, Minimal Language"
optional = false
optional = true
python-versions = ">=2.6, !=3.0.*, !=3.1.*, !=3.2.*"
groups = ["main"]
markers = "python_version == \"3.10\""
markers = "extra == \"cloud-tool-sandbox\" or python_version == \"3.10\""
files = [
{file = "toml-0.10.2-py2.py3-none-any.whl", hash = "sha256:806143ae5bfb6a3c6e736a764057db0e6a0e05e338b5630894a5f779cabb4f9b"},
{file = "toml-0.10.2.tar.gz", hash = "sha256:b3bda1d108d5dd99f4a20d24d9c348e91c4db7ab1b749200bded2f839ccbe68f"},
@@ -7938,6 +8066,32 @@ rich = ">=10.11.0"
shellingham = ">=1.3.0"
typing-extensions = ">=3.7.4.3"
[[package]]
name = "types-certifi"
version = "2021.10.8.3"
description = "Typing stubs for certifi"
optional = true
python-versions = "*"
groups = ["main"]
markers = "extra == \"cloud-tool-sandbox\""
files = [
{file = "types-certifi-2021.10.8.3.tar.gz", hash = "sha256:72cf7798d165bc0b76e1c10dd1ea3097c7063c42c21d664523b928e88b554a4f"},
{file = "types_certifi-2021.10.8.3-py3-none-any.whl", hash = "sha256:b2d1e325e69f71f7c78e5943d410e650b4707bb0ef32e4ddf3da37f54176e88a"},
]
[[package]]
name = "types-toml"
version = "0.10.8.20240310"
description = "Typing stubs for toml"
optional = true
python-versions = ">=3.8"
groups = ["main"]
markers = "extra == \"cloud-tool-sandbox\""
files = [
{file = "types-toml-0.10.8.20240310.tar.gz", hash = "sha256:3d41501302972436a6b8b239c850b26689657e25281b48ff0ec06345b8830331"},
{file = "types_toml-0.10.8.20240310-py3-none-any.whl", hash = "sha256:627b47775d25fa29977d9c70dc0cbab3f314f32c8d8d0c012f2ef5de7aaec05d"},
]
[[package]]
name = "typing-extensions"
version = "4.14.1"
@@ -8145,7 +8299,7 @@ description = "Simple, modern and high performance file watching and code reload
optional = true
python-versions = ">=3.9"
groups = ["main"]
markers = "extra == \"experimental\" or extra == \"all\""
markers = "extra == \"experimental\" or extra == \"all\" or extra == \"cloud-tool-sandbox\""
files = [
{file = "watchfiles-1.1.0-cp310-cp310-macosx_10_12_x86_64.whl", hash = "sha256:27f30e14aa1c1e91cb653f03a63445739919aef84c8d2517997a83155e7a2fcc"},
{file = "watchfiles-1.1.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:3366f56c272232860ab45c77c3ca7b74ee819c8e1f6f35a7125556b198bbc6df"},
@@ -8851,7 +9005,7 @@ cffi = ["cffi (>=1.11)"]
[extras]
all = ["autoflake", "black", "docker", "fastapi", "google-cloud-profiler", "granian", "isort", "langchain", "langchain-community", "locust", "pexpect", "pg8000", "pgvector", "pinecone", "pre-commit", "psycopg2", "psycopg2-binary", "pyright", "pytest-asyncio", "pytest-order", "redis", "uvicorn", "uvloop", "wikipedia"]
bedrock = ["aioboto3", "boto3"]
cloud-tool-sandbox = ["e2b-code-interpreter"]
cloud-tool-sandbox = ["e2b-code-interpreter", "modal"]
desktop = ["docker", "fastapi", "langchain", "langchain-community", "locust", "pyright", "sqlite-vec", "uvicorn", "wikipedia"]
dev = ["autoflake", "black", "isort", "locust", "pexpect", "pre-commit", "pyright", "pytest-asyncio", "pytest-order"]
experimental = ["google-cloud-profiler", "granian", "uvloop"]
@@ -8866,4 +9020,4 @@ tests = ["wikipedia"]
[metadata]
lock-version = "2.1"
python-versions = "<3.14,>=3.10"
content-hash = "6ed92bd1cd08f0365c819c140b131634763d0f35035ad32c536c397cd99f0259"
content-hash = "6511c865acfab5a77a5c4a3952b48e403cfdc02ddc7210e08349c4bb5d93295f"

View File

@@ -104,6 +104,7 @@ markitdown = {extras = ["docx", "pdf", "pptx"], version = "^0.1.2"}
google-cloud-profiler = {version = "^4.1.0", optional = true}
sqlite-vec = {version = "^0.1.7a2", optional = true}
orjson = "^3.11.1"
modal = {version = "^1.1.0", optional = true}
[tool.poetry.extras]
@@ -113,7 +114,7 @@ pinecone = ["pinecone"]
dev = ["pytest", "pytest-asyncio", "pexpect", "black", "pre-commit", "pyright", "pytest-order", "autoflake", "isort", "locust"]
experimental = ["uvloop", "granian", "google-cloud-profiler"]
server = ["websockets", "fastapi", "uvicorn"]
cloud-tool-sandbox = ["e2b-code-interpreter"]
cloud-tool-sandbox = ["e2b-code-interpreter", "modal"]
external-tools = ["docker", "langchain", "wikipedia", "langchain-community", "firecrawl-py"]
tests = ["wikipedia"]
bedrock = ["boto3", "aioboto3"]

View File

@@ -526,10 +526,10 @@ async def test_local_sandbox_default(disable_e2b_api_key, add_integers_tool, tes
args = {"x": 10, "y": 5}
# Mock and assert correct pathway was invoked
with patch.object(AsyncToolSandboxLocal, "run_local_dir_sandbox") as mock_run_local_dir_sandbox:
with patch.object(AsyncToolSandboxLocal, "run") as mock_run:
sandbox = AsyncToolSandboxLocal(add_integers_tool.name, args, user=test_user)
await sandbox.run()
mock_run_local_dir_sandbox.assert_called_once()
mock_run.assert_called_once()
# Run again to get actual response
sandbox = AsyncToolSandboxLocal(add_integers_tool.name, args, user=test_user)
@@ -731,10 +731,10 @@ async def test_e2b_sandbox_default(check_e2b_key_is_set, add_integers_tool, test
args = {"x": 10, "y": 5}
# Mock and assert correct pathway was invoked
with patch.object(AsyncToolSandboxE2B, "run_e2b_sandbox") as mock_run_local_dir_sandbox:
with patch.object(AsyncToolSandboxE2B, "run") as mock_run:
sandbox = AsyncToolSandboxE2B(add_integers_tool.name, args, user=test_user)
await sandbox.run()
mock_run_local_dir_sandbox.assert_called_once()
mock_run.assert_called_once()
# Run again to get actual response
sandbox = AsyncToolSandboxE2B(add_integers_tool.name, args, user=test_user)

View File

@@ -54,7 +54,7 @@ from letta.schemas.agent import CreateAgent, UpdateAgent
from letta.schemas.block import Block as PydanticBlock
from letta.schemas.block import BlockUpdate, CreateBlock
from letta.schemas.embedding_config import EmbeddingConfig
from letta.schemas.enums import ActorType, AgentStepStatus, FileProcessingStatus, JobStatus, JobType, MessageRole, ProviderType
from letta.schemas.enums import ActorType, AgentStepStatus, FileProcessingStatus, JobStatus, JobType, MessageRole, ProviderType, SandboxType
from letta.schemas.environment_variables import SandboxEnvironmentVariableCreate, SandboxEnvironmentVariableUpdate
from letta.schemas.file import FileMetadata
from letta.schemas.file import FileMetadata as PydanticFileMetadata
@@ -76,7 +76,7 @@ from letta.schemas.organization import OrganizationUpdate
from letta.schemas.passage import Passage as PydanticPassage
from letta.schemas.pip_requirement import PipRequirement
from letta.schemas.run import Run as PydanticRun
from letta.schemas.sandbox_config import E2BSandboxConfig, LocalSandboxConfig, SandboxConfigCreate, SandboxConfigUpdate, SandboxType
from letta.schemas.sandbox_config import E2BSandboxConfig, LocalSandboxConfig, SandboxConfigCreate, SandboxConfigUpdate
from letta.schemas.source import Source as PydanticSource
from letta.schemas.source import SourceUpdate
from letta.schemas.tool import Tool as PydanticTool
@@ -3560,9 +3560,9 @@ def test_update_tool_by_id(server: SyncServer, print_tool, default_user):
assert updated_tool.tool_type == ToolType.CUSTOM
# Dangerous: we bypass safety to give it another tool type
server.tool_manager.update_tool_by_id(print_tool.id, tool_update, actor=default_user, updated_tool_type=ToolType.EXTERNAL_LANGCHAIN)
server.tool_manager.update_tool_by_id(print_tool.id, tool_update, actor=default_user, updated_tool_type=ToolType.EXTERNAL_MCP)
updated_tool = server.tool_manager.get_tool_by_id(print_tool.id, actor=default_user)
assert updated_tool.tool_type == ToolType.EXTERNAL_LANGCHAIN
assert updated_tool.tool_type == ToolType.EXTERNAL_MCP
def test_update_tool_source_code_refreshes_schema_and_name(server: SyncServer, print_tool, default_user):

View File

@@ -14,11 +14,10 @@ import letta.utils as utils
from letta.constants import BASE_MEMORY_TOOLS, BASE_TOOLS, LETTA_DIR, LETTA_TOOL_EXECUTION_DIR
from letta.orm import Provider, ProviderTrace, Step
from letta.schemas.block import CreateBlock
from letta.schemas.enums import MessageRole, ProviderCategory, ProviderType
from letta.schemas.enums import MessageRole, ProviderCategory, ProviderType, SandboxType
from letta.schemas.letta_message import LettaMessage, ReasoningMessage, SystemMessage, ToolCallMessage, ToolReturnMessage, UserMessage
from letta.schemas.llm_config import LLMConfig
from letta.schemas.providers import ProviderCreate
from letta.schemas.sandbox_config import SandboxType
from letta.schemas.user import User
from letta.server.db import db_registry