diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index 288a92f0..dade61ca 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -13,23 +13,13 @@ repos: hooks: - id: trufflehog name: TruffleHog - entry: bash -c 'trufflehog git file://. --since-commit HEAD --results=verified,unknown --fail' + entry: bash -c 'trufflehog git file://. --since-commit HEAD --results=verified,unknown --fail --no-update' language: system stages: ["pre-commit", "pre-push"] - - id: autoflake - name: autoflake - entry: bash -c '[ -d "apps/core" ] && cd apps/core; uv run autoflake --remove-all-unused-imports --remove-unused-variables --in-place --recursive --ignore-init-module-imports .' - language: system - types: [python] - - id: isort - name: isort - entry: bash -c '[ -d "apps/core" ] && cd apps/core; uv run isort --profile black .' - language: system - types: [python] - exclude: ^docs/ - - id: black - name: black - entry: bash -c '[ -d "apps/core" ] && cd apps/core; uv run black --line-length 140 --target-version py310 --target-version py311 .' - language: system - types: [python] - exclude: ^docs/ + + - repo: https://github.com/astral-sh/ruff-pre-commit + rev: v0.12.11 + hooks: + - id: ruff-check + args: [ --fix ] + - id: ruff-format diff --git a/alembic/versions/220856bbf43b_add_read_only_column.py b/alembic/versions/220856bbf43b_add_read_only_column.py index ed4bdb39..52d0b89e 100644 --- a/alembic/versions/220856bbf43b_add_read_only_column.py +++ b/alembic/versions/220856bbf43b_add_read_only_column.py @@ -28,7 +28,7 @@ def upgrade() -> None: # add default value of `False` op.add_column("block", sa.Column("read_only", sa.Boolean(), nullable=True)) op.execute( - f""" + """ UPDATE block SET read_only = False """ diff --git a/alembic/versions/88f9432739a9_add_jobtype_to_job_table.py b/alembic/versions/88f9432739a9_add_jobtype_to_job_table.py index 91301f7e..a097c3a4 100644 --- a/alembic/versions/88f9432739a9_add_jobtype_to_job_table.py +++ b/alembic/versions/88f9432739a9_add_jobtype_to_job_table.py @@ -29,7 +29,7 @@ def upgrade() -> None: op.add_column("jobs", sa.Column("job_type", sa.String(), nullable=True)) # Set existing rows to have the default value of JobType.JOB - op.execute(f"UPDATE jobs SET job_type = 'job' WHERE job_type IS NULL") + op.execute("UPDATE jobs SET job_type = 'job' WHERE job_type IS NULL") # Make the column non-nullable after setting default values op.alter_column("jobs", "job_type", existing_type=sa.String(), nullable=False) diff --git a/alembic/versions/bdddd421ec41_add_privileged_tools_to_organization.py b/alembic/versions/bdddd421ec41_add_privileged_tools_to_organization.py index 5fb09eed..cfd4b7e4 100644 --- a/alembic/versions/bdddd421ec41_add_privileged_tools_to_organization.py +++ b/alembic/versions/bdddd421ec41_add_privileged_tools_to_organization.py @@ -30,7 +30,7 @@ def upgrade() -> None: # fill in column with `False` op.execute( - f""" + """ UPDATE organizations SET privileged_tools = False """ diff --git a/alembic/versions/e20573fe9b86_add_tool_types.py b/alembic/versions/e20573fe9b86_add_tool_types.py index 3ec94685..afb2822d 100644 --- a/alembic/versions/e20573fe9b86_add_tool_types.py +++ b/alembic/versions/e20573fe9b86_add_tool_types.py @@ -42,7 +42,7 @@ def upgrade() -> None: f""" UPDATE tools SET tool_type = '{letta_core_value}' - WHERE name IN ({','.join(f"'{name}'" for name in BASE_TOOLS)}); + WHERE name IN ({",".join(f"'{name}'" for name in BASE_TOOLS)}); """ ) @@ -50,7 +50,7 @@ def upgrade() -> None: f""" UPDATE tools SET tool_type = '{letta_memory_core_value}' - WHERE name IN ({','.join(f"'{name}'" for name in BASE_MEMORY_TOOLS)}); + WHERE name IN ({",".join(f"'{name}'" for name in BASE_MEMORY_TOOLS)}); """ ) diff --git a/alembic/versions/e991d2e3b428_add_monotonically_increasing_ids_to_.py b/alembic/versions/e991d2e3b428_add_monotonically_increasing_ids_to_.py index 2b2370ef..6a63e0f3 100644 --- a/alembic/versions/e991d2e3b428_add_monotonically_increasing_ids_to_.py +++ b/alembic/versions/e991d2e3b428_add_monotonically_increasing_ids_to_.py @@ -71,7 +71,7 @@ def upgrade() -> None: WITH numbered_rows AS ( SELECT id, - ROW_NUMBER() OVER (ORDER BY {', '.join(ORDERING_COLUMNS)} ASC) as rn + ROW_NUMBER() OVER (ORDER BY {", ".join(ORDERING_COLUMNS)} ASC) as rn FROM {TABLE_NAME} ) UPDATE {TABLE_NAME} diff --git a/letta/agent.py b/letta/agent.py index bf9e5e7a..cbfa7f6c 100644 --- a/letta/agent.py +++ b/letta/agent.py @@ -49,9 +49,7 @@ from letta.schemas.enums import MessageRole, ProviderType, StepStatus, ToolType from letta.schemas.letta_message_content import ImageContent, TextContent from letta.schemas.memory import ContextWindowOverview, Memory from letta.schemas.message import Message, MessageCreate, ToolReturn -from letta.schemas.openai.chat_completion_response import ChatCompletionResponse -from letta.schemas.openai.chat_completion_response import Message as ChatCompletionMessage -from letta.schemas.openai.chat_completion_response import UsageStatistics +from letta.schemas.openai.chat_completion_response import ChatCompletionResponse, Message as ChatCompletionMessage, UsageStatistics from letta.schemas.response_format import ResponseFormatType from letta.schemas.tool import Tool from letta.schemas.tool_execution_result import ToolExecutionResult @@ -871,7 +869,6 @@ class Agent(BaseAgent): ) -> AgentStepResponse: """Runs a single step in the agent loop (generates at most one LLM call)""" try: - # Extract job_id from metadata if present job_id = metadata.get("job_id") if metadata else None @@ -1084,9 +1081,9 @@ class Agent(BaseAgent): -> agent.step(messages=[Message(role='user', text=...)]) """ # Wrap with metadata, dumps to JSON - assert user_message_str and isinstance( - user_message_str, str - ), f"user_message_str should be a non-empty string, got {type(user_message_str)}" + assert user_message_str and isinstance(user_message_str, str), ( + f"user_message_str should be a non-empty string, got {type(user_message_str)}" + ) user_message_json_str = package_user_message(user_message_str, self.agent_state.timezone) # Validate JSON via save/load diff --git a/letta/agents/letta_agent.py b/letta/agents/letta_agent.py index 6adf8597..7e0019ea 100644 --- a/letta/agents/letta_agent.py +++ b/letta/agents/letta_agent.py @@ -269,16 +269,20 @@ class LettaAgent(BaseAgent): effective_step_id = step_id if logged_step else None try: - request_data, response_data, current_in_context_messages, new_in_context_messages, valid_tool_names = ( - await self._build_and_request_from_llm( - current_in_context_messages, - new_in_context_messages, - agent_state, - llm_client, - tool_rules_solver, - agent_step_span, - step_metrics, - ) + ( + request_data, + response_data, + current_in_context_messages, + new_in_context_messages, + valid_tool_names, + ) = await self._build_and_request_from_llm( + current_in_context_messages, + new_in_context_messages, + agent_state, + llm_client, + tool_rules_solver, + agent_step_span, + step_metrics, ) in_context_messages = current_in_context_messages + new_in_context_messages @@ -574,16 +578,20 @@ class LettaAgent(BaseAgent): effective_step_id = step_id if logged_step else None try: - request_data, response_data, current_in_context_messages, new_in_context_messages, valid_tool_names = ( - await self._build_and_request_from_llm( - current_in_context_messages, - new_in_context_messages, - agent_state, - llm_client, - tool_rules_solver, - agent_step_span, - step_metrics, - ) + ( + request_data, + response_data, + current_in_context_messages, + new_in_context_messages, + valid_tool_names, + ) = await self._build_and_request_from_llm( + current_in_context_messages, + new_in_context_messages, + agent_state, + llm_client, + tool_rules_solver, + agent_step_span, + step_metrics, ) in_context_messages = current_in_context_messages + new_in_context_messages @@ -1626,7 +1634,6 @@ class LettaAgent(BaseAgent): tool_rules_solver: ToolRulesSolver, is_final_step: bool | None, ) -> tuple[bool, str | None, LettaStopReason | None]: - continue_stepping = request_heartbeat heartbeat_reason: str | None = None stop_reason: LettaStopReason | None = None @@ -1658,9 +1665,7 @@ class LettaAgent(BaseAgent): uncalled = tool_rules_solver.get_uncalled_required_tools(available_tools=set([t.name for t in agent_state.tools])) if not continue_stepping and uncalled: continue_stepping = True - heartbeat_reason = ( - f"{NON_USER_MSG_PREFIX}Continuing, user expects these tools: [" f"{', '.join(uncalled)}] to be called still." - ) + heartbeat_reason = f"{NON_USER_MSG_PREFIX}Continuing, user expects these tools: [{', '.join(uncalled)}] to be called still." stop_reason = None # reset – we’re still going diff --git a/letta/client/streaming.py b/letta/client/streaming.py index e45a36aa..9154051a 100644 --- a/letta/client/streaming.py +++ b/letta/client/streaming.py @@ -23,7 +23,6 @@ def _sse_post(url: str, data: dict, headers: dict) -> Generator[Union[LettaStrea # TODO: Please note his is a very generous timeout for e2b reasons with httpx.Client(timeout=httpx.Timeout(5 * 60.0, read=5 * 60.0)) as client: with connect_sse(client, method="POST", url=url, json=data, headers=headers) as event_source: - # Check for immediate HTTP errors before processing the SSE stream if not event_source.response.is_success: response_bytes = event_source.response.read() diff --git a/letta/functions/schema_generator.py b/letta/functions/schema_generator.py index c6d579bf..8c657f43 100644 --- a/letta/functions/schema_generator.py +++ b/letta/functions/schema_generator.py @@ -593,7 +593,6 @@ def generate_tool_schema_for_mcp( append_heartbeat: bool = True, strict: bool = False, ) -> Dict[str, Any]: - # MCP tool.inputSchema is a JSON schema # https://github.com/modelcontextprotocol/python-sdk/blob/775f87981300660ee957b63c2a14b448ab9c3675/src/mcp/types.py#L678 parameters_schema = mcp_tool.inputSchema diff --git a/letta/helpers/converters.py b/letta/helpers/converters.py index b2a43986..d2fc323a 100644 --- a/letta/helpers/converters.py +++ b/letta/helpers/converters.py @@ -2,8 +2,7 @@ from typing import Any, Dict, List, Optional, Union import numpy as np from anthropic.types.beta.messages import BetaMessageBatch, BetaMessageBatchIndividualResponse -from openai.types.chat.chat_completion_message_tool_call import ChatCompletionMessageToolCall as OpenAIToolCall -from openai.types.chat.chat_completion_message_tool_call import Function as OpenAIFunction +from openai.types.chat.chat_completion_message_tool_call import ChatCompletionMessageToolCall as OpenAIToolCall, Function as OpenAIFunction from sqlalchemy import Dialect from letta.functions.mcp_client.types import StdioServerConfig diff --git a/letta/helpers/datetime_helpers.py b/letta/helpers/datetime_helpers.py index 789b34e2..d4e783f8 100644 --- a/letta/helpers/datetime_helpers.py +++ b/letta/helpers/datetime_helpers.py @@ -1,7 +1,6 @@ import re import time -from datetime import datetime, timedelta -from datetime import timezone as dt_timezone +from datetime import datetime, timedelta, timezone as dt_timezone from typing import Callable import pytz diff --git a/letta/helpers/pinecone_utils.py b/letta/helpers/pinecone_utils.py index e014061b..f2958e8e 100644 --- a/letta/helpers/pinecone_utils.py +++ b/letta/helpers/pinecone_utils.py @@ -317,7 +317,6 @@ async def list_pinecone_index_for_files(file_id: str, actor: User, limit: int = async with PineconeAsyncio(api_key=settings.pinecone_api_key) as pc: description = await pc.describe_index(name=settings.pinecone_source_index) async with pc.IndexAsyncio(host=description.index.host) as dense_index: - kwargs = {"namespace": namespace, "prefix": file_id} if limit is not None: kwargs["limit"] = limit diff --git a/letta/interface.py b/letta/interface.py index ca9eea12..733aaddb 100644 --- a/letta/interface.py +++ b/letta/interface.py @@ -198,23 +198,23 @@ class CLIInterface(AgentInterface): try: msg_dict = eval(function_args) if function_name == "archival_memory_search": - output = f'\tquery: {msg_dict["query"]}, page: {msg_dict["page"]}' + output = f"\tquery: {msg_dict['query']}, page: {msg_dict['page']}" if STRIP_UI: print(output) else: print(f"{Fore.RED}{output}{Style.RESET_ALL}") elif function_name == "archival_memory_insert": - output = f'\t→ {msg_dict["content"]}' + output = f"\t→ {msg_dict['content']}" if STRIP_UI: print(output) else: print(f"{Style.BRIGHT}{Fore.RED}{output}{Style.RESET_ALL}") else: if STRIP_UI: - print(f'\t {msg_dict["old_content"]}\n\t→ {msg_dict["new_content"]}') + print(f"\t {msg_dict['old_content']}\n\t→ {msg_dict['new_content']}") else: print( - f'{Style.BRIGHT}\t{Fore.RED} {msg_dict["old_content"]}\n\t{Fore.GREEN}→ {msg_dict["new_content"]}{Style.RESET_ALL}' + f"{Style.BRIGHT}\t{Fore.RED} {msg_dict['old_content']}\n\t{Fore.GREEN}→ {msg_dict['new_content']}{Style.RESET_ALL}" ) except Exception as e: printd(str(e)) @@ -223,7 +223,7 @@ class CLIInterface(AgentInterface): print_function_message("🧠", f"searching memory with {function_name}") try: msg_dict = eval(function_args) - output = f'\tquery: {msg_dict["query"]}, page: {msg_dict["page"]}' + output = f"\tquery: {msg_dict['query']}, page: {msg_dict['page']}" if STRIP_UI: print(output) else: diff --git a/letta/interfaces/openai_streaming_interface.py b/letta/interfaces/openai_streaming_interface.py index c4231cba..7bf3a1a3 100644 --- a/letta/interfaces/openai_streaming_interface.py +++ b/letta/interfaces/openai_streaming_interface.py @@ -232,16 +232,13 @@ class OpenAIStreamingInterface: # If we have main_json, we should output a ToolCallMessage elif updates_main_json: - # If there's something in the function_name buffer, we should release it first # NOTE: we could output it as part of a chunk that has both name and args, # however the frontend may expect name first, then args, so to be # safe we'll output name first in a separate chunk if self.function_name_buffer: - # use_assisitant_message means that we should also not release main_json raw, and instead should only release the contents of "message": "..." if self.use_assistant_message and self.function_name_buffer == self.assistant_message_tool_name: - # Store the ID of the tool call so allow skipping the corresponding response if self.function_id_buffer: self.prev_assistant_message_id = self.function_id_buffer @@ -373,7 +370,6 @@ class OpenAIStreamingInterface: # clear buffers self.function_id_buffer = None else: - # There may be a buffer from a previous chunk, for example # if the previous chunk had arguments but we needed to flush name if self.function_args_buffer: diff --git a/letta/llm_api/anthropic_client.py b/letta/llm_api/anthropic_client.py index 6950213f..e8335495 100644 --- a/letta/llm_api/anthropic_client.py +++ b/letta/llm_api/anthropic_client.py @@ -5,8 +5,7 @@ from typing import Dict, List, Optional, Union import anthropic from anthropic import AsyncStream -from anthropic.types.beta import BetaMessage as AnthropicMessage -from anthropic.types.beta import BetaRawMessageStreamEvent +from anthropic.types.beta import BetaMessage as AnthropicMessage, BetaRawMessageStreamEvent from anthropic.types.beta.message_create_params import MessageCreateParamsNonStreaming from anthropic.types.beta.messages import BetaMessageBatch from anthropic.types.beta.messages.batch_create_params import Request @@ -34,9 +33,14 @@ from letta.otel.tracing import trace_method from letta.schemas.llm_config import LLMConfig from letta.schemas.message import Message as PydanticMessage from letta.schemas.openai.chat_completion_request import Tool as OpenAITool -from letta.schemas.openai.chat_completion_response import ChatCompletionResponse, Choice, FunctionCall -from letta.schemas.openai.chat_completion_response import Message as ChoiceMessage -from letta.schemas.openai.chat_completion_response import ToolCall, UsageStatistics +from letta.schemas.openai.chat_completion_response import ( + ChatCompletionResponse, + Choice, + FunctionCall, + Message as ChoiceMessage, + ToolCall, + UsageStatistics, +) from letta.settings import model_settings DUMMY_FIRST_USER_MESSAGE = "User initializing bootup sequence." @@ -45,7 +49,6 @@ logger = get_logger(__name__) class AnthropicClient(LLMClientBase): - @trace_method @deprecated("Synchronous version of this is no longer valid. Will result in model_dump of coroutine") def request(self, request_data: dict, llm_config: LLMConfig) -> dict: diff --git a/letta/llm_api/azure_client.py b/letta/llm_api/azure_client.py index cb8ad834..63977557 100644 --- a/letta/llm_api/azure_client.py +++ b/letta/llm_api/azure_client.py @@ -13,7 +13,6 @@ from letta.settings import model_settings class AzureClient(OpenAIClient): - def get_byok_overrides(self, llm_config: LLMConfig) -> Tuple[Optional[str], Optional[str], Optional[str]]: if llm_config.provider_category == ProviderCategory.byok: from letta.services.provider_manager import ProviderManager diff --git a/letta/llm_api/bedrock_client.py b/letta/llm_api/bedrock_client.py index cd929fb0..c89b57ff 100644 --- a/letta/llm_api/bedrock_client.py +++ b/letta/llm_api/bedrock_client.py @@ -16,7 +16,6 @@ logger = get_logger(__name__) class BedrockClient(AnthropicClient): - async def get_byok_overrides_async(self, llm_config: LLMConfig) -> tuple[str, str, str]: override_access_key_id, override_secret_access_key, override_default_region = None, None, None if llm_config.provider_category == ProviderCategory.byok: diff --git a/letta/llm_api/deepseek_client.py b/letta/llm_api/deepseek_client.py index 4e678f60..7d02fcd9 100644 --- a/letta/llm_api/deepseek_client.py +++ b/letta/llm_api/deepseek_client.py @@ -11,11 +11,18 @@ from openai.types.chat.chat_completion_chunk import ChatCompletionChunk from letta.llm_api.openai_client import OpenAIClient from letta.otel.tracing import trace_method from letta.schemas.llm_config import LLMConfig -from letta.schemas.message import Message as PydanticMessage -from letta.schemas.message import Message as _Message -from letta.schemas.openai.chat_completion_request import AssistantMessage, ChatCompletionRequest, ChatMessage -from letta.schemas.openai.chat_completion_request import FunctionCall as ToolFunctionChoiceFunctionCall -from letta.schemas.openai.chat_completion_request import Tool, ToolFunctionChoice, ToolMessage, UserMessage, cast_message_to_subtype +from letta.schemas.message import Message as PydanticMessage, Message as _Message +from letta.schemas.openai.chat_completion_request import ( + AssistantMessage, + ChatCompletionRequest, + ChatMessage, + FunctionCall as ToolFunctionChoiceFunctionCall, + Tool, + ToolFunctionChoice, + ToolMessage, + UserMessage, + cast_message_to_subtype, +) from letta.schemas.openai.chat_completion_response import ChatCompletionResponse from letta.schemas.openai.openai import Function, ToolCall from letta.settings import model_settings @@ -313,7 +320,6 @@ def convert_deepseek_response_to_chatcompletion( class DeepseekClient(OpenAIClient): - def requires_auto_tool_choice(self, llm_config: LLMConfig) -> bool: return False diff --git a/letta/llm_api/google_vertex_client.py b/letta/llm_api/google_vertex_client.py index a51b7e59..42cbf5de 100644 --- a/letta/llm_api/google_vertex_client.py +++ b/letta/llm_api/google_vertex_client.py @@ -31,7 +31,6 @@ logger = get_logger(__name__) class GoogleVertexClient(LLMClientBase): - def _get_client(self): timeout_ms = int(settings.llm_request_timeout_seconds * 1000) return genai.Client( @@ -344,9 +343,9 @@ class GoogleVertexClient(LLMClientBase): if llm_config.put_inner_thoughts_in_kwargs: from letta.local_llm.constants import INNER_THOUGHTS_KWARG_VERTEX - assert ( - INNER_THOUGHTS_KWARG_VERTEX in function_args - ), f"Couldn't find inner thoughts in function args:\n{function_call}" + assert INNER_THOUGHTS_KWARG_VERTEX in function_args, ( + f"Couldn't find inner thoughts in function args:\n{function_call}" + ) inner_thoughts = function_args.pop(INNER_THOUGHTS_KWARG_VERTEX) assert inner_thoughts is not None, f"Expected non-null inner thoughts function arg:\n{function_call}" else: @@ -380,9 +379,9 @@ class GoogleVertexClient(LLMClientBase): if llm_config.put_inner_thoughts_in_kwargs: from letta.local_llm.constants import INNER_THOUGHTS_KWARG_VERTEX - assert ( - INNER_THOUGHTS_KWARG_VERTEX in function_args - ), f"Couldn't find inner thoughts in function args:\n{function_call}" + assert INNER_THOUGHTS_KWARG_VERTEX in function_args, ( + f"Couldn't find inner thoughts in function args:\n{function_call}" + ) inner_thoughts = function_args.pop(INNER_THOUGHTS_KWARG_VERTEX) assert inner_thoughts is not None, f"Expected non-null inner thoughts function arg:\n{function_call}" else: @@ -406,7 +405,7 @@ class GoogleVertexClient(LLMClientBase): except json.decoder.JSONDecodeError: if candidate.finish_reason == "MAX_TOKENS": - raise ValueError(f"Could not parse response data from LLM: exceeded max token limit") + raise ValueError("Could not parse response data from LLM: exceeded max token limit") # Inner thoughts are the content by default inner_thoughts = response_message.text @@ -463,7 +462,7 @@ class GoogleVertexClient(LLMClientBase): ) else: # Count it ourselves - assert input_messages is not None, f"Didn't get UsageMetadata from the API response, so input_messages is required" + assert input_messages is not None, "Didn't get UsageMetadata from the API response, so input_messages is required" prompt_tokens = count_tokens(json_dumps(input_messages)) # NOTE: this is a very rough approximation completion_tokens = count_tokens(json_dumps(openai_response_message.model_dump())) # NOTE: this is also approximate total_tokens = prompt_tokens + completion_tokens diff --git a/letta/llm_api/groq_client.py b/letta/llm_api/groq_client.py index 02ddb98a..25d7aaaf 100644 --- a/letta/llm_api/groq_client.py +++ b/letta/llm_api/groq_client.py @@ -14,7 +14,6 @@ from letta.settings import model_settings class GroqClient(OpenAIClient): - def requires_auto_tool_choice(self, llm_config: LLMConfig) -> bool: return False diff --git a/letta/llm_api/mistral.py b/letta/llm_api/mistral.py index 68bb87e9..8d5b8b10 100644 --- a/letta/llm_api/mistral.py +++ b/letta/llm_api/mistral.py @@ -13,7 +13,7 @@ async def mistral_get_model_list_async(url: str, api_key: str) -> dict: if api_key is not None: headers["Authorization"] = f"Bearer {api_key}" - logger.debug(f"Sending request to %s", url) + logger.debug("Sending request to %s", url) async with aiohttp.ClientSession() as session: # TODO add query param "tool" to be true diff --git a/letta/llm_api/openai.py b/letta/llm_api/openai.py index 39852be8..b113b7cd 100644 --- a/letta/llm_api/openai.py +++ b/letta/llm_api/openai.py @@ -21,11 +21,15 @@ from letta.local_llm.utils import num_tokens_from_functions, num_tokens_from_mes from letta.log import get_logger from letta.otel.tracing import log_event from letta.schemas.llm_config import LLMConfig -from letta.schemas.message import Message as _Message -from letta.schemas.message import MessageRole as _MessageRole -from letta.schemas.openai.chat_completion_request import ChatCompletionRequest -from letta.schemas.openai.chat_completion_request import FunctionCall as ToolFunctionChoiceFunctionCall -from letta.schemas.openai.chat_completion_request import FunctionSchema, Tool, ToolFunctionChoice, cast_message_to_subtype +from letta.schemas.message import Message as _Message, MessageRole as _MessageRole +from letta.schemas.openai.chat_completion_request import ( + ChatCompletionRequest, + FunctionCall as ToolFunctionChoiceFunctionCall, + FunctionSchema, + Tool, + ToolFunctionChoice, + cast_message_to_subtype, +) from letta.schemas.openai.chat_completion_response import ( ChatCompletionChunkResponse, ChatCompletionResponse, diff --git a/letta/llm_api/openai_client.py b/letta/llm_api/openai_client.py index 267898bf..fb541210 100644 --- a/letta/llm_api/openai_client.py +++ b/letta/llm_api/openai_client.py @@ -29,11 +29,14 @@ from letta.schemas.embedding_config import EmbeddingConfig from letta.schemas.letta_message_content import MessageContentType from letta.schemas.llm_config import LLMConfig from letta.schemas.message import Message as PydanticMessage -from letta.schemas.openai.chat_completion_request import ChatCompletionRequest -from letta.schemas.openai.chat_completion_request import FunctionCall as ToolFunctionChoiceFunctionCall -from letta.schemas.openai.chat_completion_request import FunctionSchema -from letta.schemas.openai.chat_completion_request import Tool as OpenAITool -from letta.schemas.openai.chat_completion_request import ToolFunctionChoice, cast_message_to_subtype +from letta.schemas.openai.chat_completion_request import ( + ChatCompletionRequest, + FunctionCall as ToolFunctionChoiceFunctionCall, + FunctionSchema, + Tool as OpenAITool, + ToolFunctionChoice, + cast_message_to_subtype, +) from letta.schemas.openai.chat_completion_response import ChatCompletionResponse from letta.settings import model_settings diff --git a/letta/llm_api/together_client.py b/letta/llm_api/together_client.py index d9a098b0..98ebf768 100644 --- a/letta/llm_api/together_client.py +++ b/letta/llm_api/together_client.py @@ -12,7 +12,6 @@ from letta.settings import model_settings class TogetherClient(OpenAIClient): - def requires_auto_tool_choice(self, llm_config: LLMConfig) -> bool: return True diff --git a/letta/llm_api/xai_client.py b/letta/llm_api/xai_client.py index 059073e4..b9d37a95 100644 --- a/letta/llm_api/xai_client.py +++ b/letta/llm_api/xai_client.py @@ -14,7 +14,6 @@ from letta.settings import model_settings class XAIClient(OpenAIClient): - def requires_auto_tool_choice(self, llm_config: LLMConfig) -> bool: return False diff --git a/letta/local_llm/chat_completion_proxy.py b/letta/local_llm/chat_completion_proxy.py index 214e0487..ba0ab45a 100644 --- a/letta/local_llm/chat_completion_proxy.py +++ b/letta/local_llm/chat_completion_proxy.py @@ -205,7 +205,7 @@ def get_chat_completion( raise LocalLLMError(f"usage dict in response was missing fields ({usage})") if usage["prompt_tokens"] is None: - printd(f"usage dict was missing prompt_tokens, computing on-the-fly...") + printd("usage dict was missing prompt_tokens, computing on-the-fly...") usage["prompt_tokens"] = count_tokens(prompt) # NOTE: we should compute on-the-fly anyways since we might have to correct for errors during JSON parsing @@ -220,7 +220,7 @@ def get_chat_completion( # NOTE: this is the token count that matters most if usage["total_tokens"] is None: - printd(f"usage dict was missing total_tokens, computing on-the-fly...") + printd("usage dict was missing total_tokens, computing on-the-fly...") usage["total_tokens"] = usage["prompt_tokens"] + usage["completion_tokens"] # unpack with response.choices[0].message.content @@ -261,9 +261,9 @@ def generate_grammar_and_documentation( ): from letta.utils import printd - assert not ( - add_inner_thoughts_top_level and add_inner_thoughts_param_level - ), "Can only place inner thoughts in one location in the grammar generator" + assert not (add_inner_thoughts_top_level and add_inner_thoughts_param_level), ( + "Can only place inner thoughts in one location in the grammar generator" + ) grammar_function_models = [] # create_dynamic_model_from_function will add inner thoughts to the function parameters if add_inner_thoughts is True. diff --git a/letta/local_llm/settings/settings.py b/letta/local_llm/settings/settings.py index 9efbcf6a..3671e30b 100644 --- a/letta/local_llm/settings/settings.py +++ b/letta/local_llm/settings/settings.py @@ -46,7 +46,7 @@ def get_completions_settings(defaults="simple") -> dict: with open(settings_file, "r", encoding="utf-8") as file: user_settings = json.load(file) if len(user_settings) > 0: - printd(f"Updating base settings with the following user settings:\n{json_dumps(user_settings,indent=2)}") + printd(f"Updating base settings with the following user settings:\n{json_dumps(user_settings, indent=2)}") settings.update(user_settings) else: printd(f"'{settings_file}' was empty, ignoring...") diff --git a/letta/orm/agent.py b/letta/orm/agent.py index e90ec5b8..1b6eeb03 100644 --- a/letta/orm/agent.py +++ b/letta/orm/agent.py @@ -13,8 +13,7 @@ from letta.orm.identity import Identity from letta.orm.mixins import OrganizationMixin, ProjectMixin, TemplateEntityMixin, TemplateMixin from letta.orm.organization import Organization from letta.orm.sqlalchemy_base import SqlalchemyBase -from letta.schemas.agent import AgentState as PydanticAgentState -from letta.schemas.agent import AgentType, get_prompt_template_for_agent_type +from letta.schemas.agent import AgentState as PydanticAgentState, AgentType, get_prompt_template_for_agent_type from letta.schemas.embedding_config import EmbeddingConfig from letta.schemas.llm_config import LLMConfig from letta.schemas.memory import Memory diff --git a/letta/orm/block.py b/letta/orm/block.py index d17e89c5..0d3d1605 100644 --- a/letta/orm/block.py +++ b/letta/orm/block.py @@ -8,8 +8,7 @@ from letta.orm.block_history import BlockHistory from letta.orm.blocks_agents import BlocksAgents from letta.orm.mixins import OrganizationMixin, ProjectMixin, TemplateEntityMixin, TemplateMixin from letta.orm.sqlalchemy_base import SqlalchemyBase -from letta.schemas.block import Block as PydanticBlock -from letta.schemas.block import Human, Persona +from letta.schemas.block import Block as PydanticBlock, Human, Persona if TYPE_CHECKING: from letta.orm import Organization diff --git a/letta/orm/block_history.py b/letta/orm/block_history.py index 20828124..9819e447 100644 --- a/letta/orm/block_history.py +++ b/letta/orm/block_history.py @@ -38,7 +38,9 @@ class BlockHistory(OrganizationMixin, SqlalchemyBase): # Relationships block_id: Mapped[str] = mapped_column( - String, ForeignKey("block.id", ondelete="CASCADE"), nullable=False # History deleted if Block is deleted + String, + ForeignKey("block.id", ondelete="CASCADE"), + nullable=False, # History deleted if Block is deleted ) sequence_number: Mapped[int] = mapped_column( diff --git a/letta/orm/group.py b/letta/orm/group.py index e819ec12..f01e8357 100644 --- a/letta/orm/group.py +++ b/letta/orm/group.py @@ -10,7 +10,6 @@ from letta.schemas.group import Group as PydanticGroup class Group(SqlalchemyBase, OrganizationMixin, ProjectMixin, TemplateMixin): - __tablename__ = "groups" __pydantic_model__ = PydanticGroup diff --git a/letta/orm/identity.py b/letta/orm/identity.py index 0d4f13ca..75a90525 100644 --- a/letta/orm/identity.py +++ b/letta/orm/identity.py @@ -7,8 +7,7 @@ from sqlalchemy.orm import Mapped, mapped_column, relationship from letta.orm.mixins import OrganizationMixin, ProjectMixin from letta.orm.sqlalchemy_base import SqlalchemyBase -from letta.schemas.identity import Identity as PydanticIdentity -from letta.schemas.identity import IdentityProperty +from letta.schemas.identity import Identity as PydanticIdentity, IdentityProperty class Identity(SqlalchemyBase, OrganizationMixin, ProjectMixin): diff --git a/letta/orm/job.py b/letta/orm/job.py index fb349170..37edc701 100644 --- a/letta/orm/job.py +++ b/letta/orm/job.py @@ -7,8 +7,7 @@ from sqlalchemy.orm import Mapped, mapped_column, relationship from letta.orm.mixins import UserMixin from letta.orm.sqlalchemy_base import SqlalchemyBase from letta.schemas.enums import JobStatus, JobType -from letta.schemas.job import Job as PydanticJob -from letta.schemas.job import LettaRequestConfig +from letta.schemas.job import Job as PydanticJob, LettaRequestConfig if TYPE_CHECKING: from letta.orm.job_messages import JobMessage diff --git a/letta/orm/llm_batch_items.py b/letta/orm/llm_batch_items.py index ee696905..b4f08cb0 100644 --- a/letta/orm/llm_batch_items.py +++ b/letta/orm/llm_batch_items.py @@ -9,8 +9,7 @@ from letta.orm.custom_columns import AgentStepStateColumn, BatchRequestResultCol from letta.orm.mixins import AgentMixin, OrganizationMixin from letta.orm.sqlalchemy_base import SqlalchemyBase from letta.schemas.enums import AgentStepStatus, JobStatus -from letta.schemas.llm_batch_job import AgentStepState -from letta.schemas.llm_batch_job import LLMBatchItem as PydanticLLMBatchItem +from letta.schemas.llm_batch_job import AgentStepState, LLMBatchItem as PydanticLLMBatchItem from letta.schemas.llm_config import LLMConfig diff --git a/letta/orm/message.py b/letta/orm/message.py index 5e96790d..c331c593 100644 --- a/letta/orm/message.py +++ b/letta/orm/message.py @@ -7,10 +7,8 @@ from sqlalchemy.orm import Mapped, Session, mapped_column, relationship from letta.orm.custom_columns import MessageContentColumn, ToolCallColumn, ToolReturnColumn from letta.orm.mixins import AgentMixin, OrganizationMixin from letta.orm.sqlalchemy_base import SqlalchemyBase -from letta.schemas.letta_message_content import MessageContent -from letta.schemas.letta_message_content import TextContent as PydanticTextContent -from letta.schemas.message import Message as PydanticMessage -from letta.schemas.message import ToolReturn +from letta.schemas.letta_message_content import MessageContent, TextContent as PydanticTextContent +from letta.schemas.message import Message as PydanticMessage, ToolReturn from letta.settings import DatabaseChoice, settings diff --git a/letta/orm/sandbox_config.py b/letta/orm/sandbox_config.py index b8d6ba57..a3d22b18 100644 --- a/letta/orm/sandbox_config.py +++ b/letta/orm/sandbox_config.py @@ -1,9 +1,7 @@ import uuid from typing import TYPE_CHECKING, Dict, List, Optional -from sqlalchemy import JSON -from sqlalchemy import Enum as SqlEnum -from sqlalchemy import Index, String, UniqueConstraint +from sqlalchemy import JSON, Enum as SqlEnum, Index, String, UniqueConstraint from sqlalchemy.orm import Mapped, mapped_column, relationship from letta.orm.mixins import AgentMixin, OrganizationMixin, SandboxConfigMixin diff --git a/letta/plugins/plugins.py b/letta/plugins/plugins.py index 6530d205..602599dd 100644 --- a/letta/plugins/plugins.py +++ b/letta/plugins/plugins.py @@ -37,7 +37,7 @@ def get_plugin(plugin_type: str): return plugin elif type(plugin).__name__ == "class": if plugin_register["protocol"] and not isinstance(plugin, type(plugin_register["protocol"])): - raise TypeError(f'{plugin} does not implement {type(plugin_register["protocol"]).__name__}') + raise TypeError(f"{plugin} does not implement {type(plugin_register['protocol']).__name__}") return plugin() raise TypeError("Unknown plugin type") diff --git a/letta/prompts/prompt_generator.py b/letta/prompts/prompt_generator.py index ffb36b05..267cf7c1 100644 --- a/letta/prompts/prompt_generator.py +++ b/letta/prompts/prompt_generator.py @@ -9,7 +9,6 @@ from letta.schemas.memory import Memory class PromptGenerator: - # TODO: This code is kind of wonky and deserves a rewrite @trace_method @staticmethod diff --git a/letta/schemas/embedding_config.py b/letta/schemas/embedding_config.py index 2840146e..a2694f12 100644 --- a/letta/schemas/embedding_config.py +++ b/letta/schemas/embedding_config.py @@ -43,7 +43,6 @@ class EmbeddingConfig(BaseModel): @classmethod def default_config(cls, model_name: Optional[str] = None, provider: Optional[str] = None): - if model_name == "text-embedding-ada-002" and provider == "openai": return cls( embedding_model="text-embedding-ada-002", diff --git a/letta/schemas/message.py b/letta/schemas/message.py index 2b294dad..851d733f 100644 --- a/letta/schemas/message.py +++ b/letta/schemas/message.py @@ -9,8 +9,7 @@ from collections import OrderedDict from datetime import datetime, timezone from typing import Any, Dict, List, Literal, Optional, Union -from openai.types.chat.chat_completion_message_tool_call import ChatCompletionMessageToolCall as OpenAIToolCall -from openai.types.chat.chat_completion_message_tool_call import Function as OpenAIFunction +from openai.types.chat.chat_completion_message_tool_call import ChatCompletionMessageToolCall as OpenAIToolCall, Function as OpenAIFunction from pydantic import BaseModel, Field, field_validator from letta.constants import DEFAULT_MESSAGE_TOOL, DEFAULT_MESSAGE_TOOL_KWARG, TOOL_CALL_ID_MAX_LEN @@ -880,7 +879,6 @@ class Message(BaseMessage): # Tool calling if self.tool_calls is not None: for tool_call in self.tool_calls: - if put_inner_thoughts_in_kwargs: tool_call_input = add_inner_thoughts_to_tool_call( tool_call, @@ -1021,7 +1019,7 @@ class Message(BaseMessage): assert self.tool_call_id is not None, vars(self) if self.name is None: - warnings.warn(f"Couldn't find function name on tool call, defaulting to tool ID instead.") + warnings.warn("Couldn't find function name on tool call, defaulting to tool ID instead.") function_name = self.tool_call_id else: function_name = self.name diff --git a/letta/schemas/providers/bedrock.py b/letta/schemas/providers/bedrock.py index d8ebf1cb..94b0ffa9 100644 --- a/letta/schemas/providers/bedrock.py +++ b/letta/schemas/providers/bedrock.py @@ -35,7 +35,7 @@ class BedrockProvider(Provider): response = await bedrock.list_inference_profiles() return response["inferenceProfileSummaries"] except Exception as e: - logger.error(f"Error getting model list for bedrock: %s", e) + logger.error("Error getting model list for bedrock: %s", e) raise e async def check_api_key(self): diff --git a/letta/schemas/providers/openai.py b/letta/schemas/providers/openai.py index 0ca67652..d4f2fce9 100644 --- a/letta/schemas/providers/openai.py +++ b/letta/schemas/providers/openai.py @@ -203,7 +203,7 @@ class OpenAIProvider(Provider): continue else: logger.debug( - f"Skipping embedding models for %s by default, as we don't assume embeddings are supported." + "Skipping embedding models for %s by default, as we don't assume embeddings are supported." "Please open an issue on GitHub if support is required.", self.base_url, ) @@ -227,7 +227,7 @@ class OpenAIProvider(Provider): return LLM_MAX_TOKENS[model_name] else: logger.debug( - f"Model %s on %s for provider %s not found in LLM_MAX_TOKENS. Using default of {{LLM_MAX_TOKENS['DEFAULT']}}", + "Model %s on %s for provider %s not found in LLM_MAX_TOKENS. Using default of {LLM_MAX_TOKENS['DEFAULT']}", model_name, self.base_url, self.__class__.__name__, diff --git a/letta/schemas/tool.py b/letta/schemas/tool.py index 3e630061..e703291c 100644 --- a/letta/schemas/tool.py +++ b/letta/schemas/tool.py @@ -218,9 +218,9 @@ class ToolCreate(LettaBase): composio_action_schemas = composio_toolset.get_action_schemas(actions=[action_name], check_connected_accounts=False) assert len(composio_action_schemas) > 0, "User supplied parameters do not match any Composio tools" - assert ( - len(composio_action_schemas) == 1 - ), f"User supplied parameters match too many Composio tools; {len(composio_action_schemas)} > 1" + assert len(composio_action_schemas) == 1, ( + f"User supplied parameters match too many Composio tools; {len(composio_action_schemas)} > 1" + ) composio_action_schema = composio_action_schemas[0] diff --git a/letta/schemas/tool_execution_result.py b/letta/schemas/tool_execution_result.py index bca66dbe..fd5bd6b4 100644 --- a/letta/schemas/tool_execution_result.py +++ b/letta/schemas/tool_execution_result.py @@ -6,7 +6,6 @@ from letta.schemas.agent import AgentState class ToolExecutionResult(BaseModel): - status: Literal["success", "error"] = Field(..., description="The status of the tool execution and return object") func_return: Optional[Any] = Field(None, description="The function return object") agent_state: Optional[AgentState] = Field(None, description="The agent state") diff --git a/letta/serialize_schemas/marshmallow_agent.py b/letta/serialize_schemas/marshmallow_agent.py index 5c543d7a..fe861ba3 100644 --- a/letta/serialize_schemas/marshmallow_agent.py +++ b/letta/serialize_schemas/marshmallow_agent.py @@ -5,8 +5,7 @@ from sqlalchemy import func from sqlalchemy.orm import sessionmaker import letta -from letta.orm import Agent -from letta.orm import Message as MessageModel +from letta.orm import Agent, Message as MessageModel from letta.schemas.agent import AgentState as PydanticAgentState from letta.schemas.user import User from letta.serialize_schemas.marshmallow_agent_environment_variable import SerializedAgentEnvironmentVariableSchema diff --git a/letta/server/rest_api/app.py b/letta/server/rest_api/app.py index b66afe5f..853b8096 100644 --- a/letta/server/rest_api/app.py +++ b/letta/server/rest_api/app.py @@ -261,7 +261,7 @@ def create_application() -> "FastAPI": @app.exception_handler(BedrockPermissionError) async def bedrock_permission_error_handler(request, exc: BedrockPermissionError): - logger.error(f"Bedrock permission denied.") + logger.error("Bedrock permission denied.") if SENTRY_ENABLED: sentry_sdk.capture_exception(exc) @@ -433,10 +433,10 @@ def start_server( if IS_WINDOWS: # Windows doesn't those the fancy unicode characters print(f"Server running at: http://{host or 'localhost'}:{port or REST_DEFAULT_PORT}") - print(f"View using ADE at: https://app.letta.com/development-servers/local/dashboard\n") + print("View using ADE at: https://app.letta.com/development-servers/local/dashboard\n") else: print(f"▶ Server running at: http://{host or 'localhost'}:{port or REST_DEFAULT_PORT}") - print(f"▶ View using ADE at: https://app.letta.com/development-servers/local/dashboard\n") + print("▶ View using ADE at: https://app.letta.com/development-servers/local/dashboard\n") if importlib.util.find_spec("granian") is not None and settings.use_granian: # Experimental Granian engine diff --git a/letta/server/rest_api/auth/index.py b/letta/server/rest_api/auth/index.py index 28d22435..6ee6f3cc 100644 --- a/letta/server/rest_api/auth/index.py +++ b/letta/server/rest_api/auth/index.py @@ -22,7 +22,6 @@ class AuthRequest(BaseModel): def setup_auth_router(server: SyncServer, interface: QueuingInterface, password: str) -> APIRouter: - @router.post("/auth", tags=["auth"], response_model=AuthResponse) def authenticate_user(request: AuthRequest) -> AuthResponse: """ diff --git a/letta/server/rest_api/interface.py b/letta/server/rest_api/interface.py index b930f774..84c23d25 100644 --- a/letta/server/rest_api/interface.py +++ b/letta/server/rest_api/interface.py @@ -377,9 +377,9 @@ class StreamingServerInterface(AgentChunkStreamingInterface): ): """Add an item to the deque""" assert self._active, "Generator is inactive" - assert ( - isinstance(item, LettaMessage) or isinstance(item, LegacyLettaMessage) or isinstance(item, MessageStreamStatus) - ), f"Wrong type: {type(item)}" + assert isinstance(item, LettaMessage) or isinstance(item, LegacyLettaMessage) or isinstance(item, MessageStreamStatus), ( + f"Wrong type: {type(item)}" + ) self._chunks.append(item) self._event.set() # Signal that new data is available @@ -731,13 +731,11 @@ class StreamingServerInterface(AgentChunkStreamingInterface): # If we have main_json, we should output a ToolCallMessage elif updates_main_json: - # If there's something in the function_name buffer, we should release it first # NOTE: we could output it as part of a chunk that has both name and args, # however the frontend may expect name first, then args, so to be # safe we'll output name first in a separate chunk if self.function_name_buffer: - # use_assisitant_message means that we should also not release main_json raw, and instead should only release the contents of "message": "..." if self.use_assistant_message and self.function_name_buffer == self.assistant_message_tool_name: processed_chunk = None @@ -778,7 +776,6 @@ class StreamingServerInterface(AgentChunkStreamingInterface): # If there was nothing in the name buffer, we can proceed to # output the arguments chunk as a ToolCallMessage else: - # use_assisitant_message means that we should also not release main_json raw, and instead should only release the contents of "message": "..." if self.use_assistant_message and ( self.last_flushed_function_name is not None @@ -860,7 +857,6 @@ class StreamingServerInterface(AgentChunkStreamingInterface): # clear buffers self.function_id_buffer = None else: - # There may be a buffer from a previous chunk, for example # if the previous chunk had arguments but we needed to flush name if self.function_args_buffer: @@ -997,7 +993,6 @@ class StreamingServerInterface(AgentChunkStreamingInterface): # Otherwise, do simple chunks of ToolCallMessage else: - tool_call_delta = {} if tool_call.id: tool_call_delta["id"] = tool_call.id @@ -1073,7 +1068,6 @@ class StreamingServerInterface(AgentChunkStreamingInterface): tool_call = message_delta.tool_calls[0] if tool_call.function: - # Track the function name while streaming # If we were previously on a 'send_message', we need to 'toggle' into 'content' mode if tool_call.function.name: @@ -1154,7 +1148,6 @@ class StreamingServerInterface(AgentChunkStreamingInterface): def internal_monologue(self, msg: str, msg_obj: Optional[Message] = None, chunk_index: Optional[int] = None): """Letta generates some internal monologue""" if not self.streaming_mode: - # create a fake "chunk" of a stream # processed_chunk = { # "internal_monologue": msg, @@ -1268,7 +1261,6 @@ class StreamingServerInterface(AgentChunkStreamingInterface): print(f"Failed to parse function message: {e}") else: - try: func_args = parse_json(function_call.function.arguments) except: diff --git a/letta/server/rest_api/redis_stream_manager.py b/letta/server/rest_api/redis_stream_manager.py index 0eb4e519..e3b31dd7 100644 --- a/letta/server/rest_api/redis_stream_manager.py +++ b/letta/server/rest_api/redis_stream_manager.py @@ -140,9 +140,7 @@ class RedisSSEStreamWriter: self.last_flush[run_id] = time.time() - logger.debug( - f"Flushed {len(chunks)} chunks to Redis stream {stream_key}, " f"seq_ids {chunks[0]['seq_id']}-{chunks[-1]['seq_id']}" - ) + logger.debug(f"Flushed {len(chunks)} chunks to Redis stream {stream_key}, seq_ids {chunks[0]['seq_id']}-{chunks[-1]['seq_id']}") if chunks[-1].get("complete") == "true": self._cleanup_run(run_id) diff --git a/letta/server/rest_api/routers/v1/blocks.py b/letta/server/rest_api/routers/v1/blocks.py index 39d57d0f..140e534f 100644 --- a/letta/server/rest_api/routers/v1/blocks.py +++ b/letta/server/rest_api/routers/v1/blocks.py @@ -34,7 +34,7 @@ async def list_blocks( ), label_search: Optional[str] = Query( None, - description=("Search blocks by label. If provided, returns blocks that match this label. " "This is a full-text search on labels."), + description=("Search blocks by label. If provided, returns blocks that match this label. This is a full-text search on labels."), ), description_search: Optional[str] = Query( None, diff --git a/letta/server/rest_api/routers/v1/sandbox_configs.py b/letta/server/rest_api/routers/v1/sandbox_configs.py index c72c35ad..f1529ec0 100644 --- a/letta/server/rest_api/routers/v1/sandbox_configs.py +++ b/letta/server/rest_api/routers/v1/sandbox_configs.py @@ -6,11 +6,17 @@ from fastapi import APIRouter, Depends, HTTPException, Query from letta.log import get_logger from letta.schemas.enums import SandboxType -from letta.schemas.environment_variables import SandboxEnvironmentVariable as PydanticEnvVar -from letta.schemas.environment_variables import SandboxEnvironmentVariableCreate, SandboxEnvironmentVariableUpdate -from letta.schemas.sandbox_config import LocalSandboxConfig -from letta.schemas.sandbox_config import SandboxConfig as PydanticSandboxConfig -from letta.schemas.sandbox_config import SandboxConfigCreate, SandboxConfigUpdate +from letta.schemas.environment_variables import ( + SandboxEnvironmentVariable as PydanticEnvVar, + SandboxEnvironmentVariableCreate, + SandboxEnvironmentVariableUpdate, +) +from letta.schemas.sandbox_config import ( + LocalSandboxConfig, + SandboxConfig as PydanticSandboxConfig, + SandboxConfigCreate, + SandboxConfigUpdate, +) from letta.server.rest_api.utils import get_letta_server, get_user_id from letta.server.server import SyncServer from letta.services.helpers.tool_execution_helper import create_venv_for_local_sandbox, install_pip_requirements_for_sandbox diff --git a/letta/server/rest_api/routers/v1/tools.py b/letta/server/rest_api/routers/v1/tools.py index 9251c04f..5b9cc0ea 100644 --- a/letta/server/rest_api/routers/v1/tools.py +++ b/letta/server/rest_api/routers/v1/tools.py @@ -749,8 +749,8 @@ async def connect_mcp_server( except ConnectionError: # TODO: jnjpng make this connection error check more specific to the 401 unauthorized error if isinstance(client, AsyncStdioMCPClient): - logger.warning(f"OAuth not supported for stdio") - yield oauth_stream_event(OauthStreamEvent.ERROR, message=f"OAuth not supported for stdio") + logger.warning("OAuth not supported for stdio") + yield oauth_stream_event(OauthStreamEvent.ERROR, message="OAuth not supported for stdio") return # Continue to OAuth flow logger.info(f"Attempting OAuth flow for {request}...") diff --git a/letta/server/rest_api/streaming_response.py b/letta/server/rest_api/streaming_response.py index 81029801..642b59a2 100644 --- a/letta/server/rest_api/streaming_response.py +++ b/letta/server/rest_api/streaming_response.py @@ -185,7 +185,7 @@ class StreamingResponseWithStatusCode(StreamingResponse): try: await asyncio.shield(self._protected_stream_response(send)) except asyncio.CancelledError: - logger.info(f"Stream response was cancelled, but shielded task should continue") + logger.info("Stream response was cancelled, but shielded task should continue") except anyio.ClosedResourceError: logger.info("Client disconnected, but shielded task should continue") self._client_connected = False diff --git a/letta/server/rest_api/utils.py b/letta/server/rest_api/utils.py index 6b14ff4a..b5426d99 100644 --- a/letta/server/rest_api/utils.py +++ b/letta/server/rest_api/utils.py @@ -7,8 +7,7 @@ from typing import TYPE_CHECKING, AsyncGenerator, Dict, Iterable, List, Optional from fastapi import Header, HTTPException from openai.types.chat import ChatCompletionMessageParam -from openai.types.chat.chat_completion_message_tool_call import ChatCompletionMessageToolCall as OpenAIToolCall -from openai.types.chat.chat_completion_message_tool_call import Function as OpenAIFunction +from openai.types.chat.chat_completion_message_tool_call import ChatCompletionMessageToolCall as OpenAIToolCall, Function as OpenAIFunction from openai.types.chat.completion_create_params import CompletionCreateParams from pydantic import BaseModel diff --git a/letta/server/server.py b/letta/server/server.py index 1f2a9225..51636802 100644 --- a/letta/server/server.py +++ b/letta/server/server.py @@ -30,8 +30,10 @@ from letta.helpers.datetime_helpers import get_utc_time from letta.helpers.json_helpers import json_dumps, json_loads # TODO use custom interface -from letta.interface import AgentInterface # abstract -from letta.interface import CLIInterface # for printing to terminal +from letta.interface import ( + AgentInterface, # abstract + CLIInterface, # for printing to terminal +) from letta.log import get_logger from letta.orm.errors import NoResultFound from letta.otel.tracing import log_event, trace_method diff --git a/letta/services/agent_manager.py b/letta/services/agent_manager.py index cdc3a678..239916df 100644 --- a/letta/services/agent_manager.py +++ b/letta/services/agent_manager.py @@ -26,37 +26,42 @@ from letta.helpers import ToolRulesSolver from letta.helpers.datetime_helpers import get_utc_time from letta.llm_api.llm_client import LLMClient from letta.log import get_logger -from letta.orm import Agent as AgentModel -from letta.orm import AgentsTags, ArchivalPassage -from letta.orm import Block as BlockModel -from letta.orm import BlocksAgents -from letta.orm import Group as GroupModel -from letta.orm import GroupsAgents, IdentitiesAgents -from letta.orm import Source as SourceModel -from letta.orm import SourcePassage, SourcesAgents -from letta.orm import Tool as ToolModel -from letta.orm import ToolsAgents +from letta.orm import ( + Agent as AgentModel, + AgentsTags, + ArchivalPassage, + Block as BlockModel, + BlocksAgents, + Group as GroupModel, + GroupsAgents, + IdentitiesAgents, + Source as SourceModel, + SourcePassage, + SourcesAgents, + Tool as ToolModel, + ToolsAgents, +) from letta.orm.errors import NoResultFound -from letta.orm.sandbox_config import AgentEnvironmentVariable -from letta.orm.sandbox_config import AgentEnvironmentVariable as AgentEnvironmentVariableModel +from letta.orm.sandbox_config import AgentEnvironmentVariable, AgentEnvironmentVariable as AgentEnvironmentVariableModel from letta.orm.sqlalchemy_base import AccessType from letta.otel.tracing import trace_method from letta.prompts.prompt_generator import PromptGenerator -from letta.schemas.agent import AgentState as PydanticAgentState -from letta.schemas.agent import AgentType, CreateAgent, InternalTemplateAgentCreate, UpdateAgent, get_prompt_template_for_agent_type -from letta.schemas.block import DEFAULT_BLOCKS -from letta.schemas.block import Block as PydanticBlock -from letta.schemas.block import BlockUpdate +from letta.schemas.agent import ( + AgentState as PydanticAgentState, + AgentType, + CreateAgent, + InternalTemplateAgentCreate, + UpdateAgent, + get_prompt_template_for_agent_type, +) +from letta.schemas.block import DEFAULT_BLOCKS, Block as PydanticBlock, BlockUpdate from letta.schemas.embedding_config import EmbeddingConfig from letta.schemas.enums import ProviderType, TagMatchMode, ToolType, VectorDBProvider from letta.schemas.file import FileMetadata as PydanticFileMetadata -from letta.schemas.group import Group as PydanticGroup -from letta.schemas.group import ManagerType +from letta.schemas.group import Group as PydanticGroup, ManagerType from letta.schemas.llm_config import LLMConfig from letta.schemas.memory import ContextWindowOverview, Memory -from letta.schemas.message import Message -from letta.schemas.message import Message as PydanticMessage -from letta.schemas.message import MessageCreate, MessageUpdate +from letta.schemas.message import Message, Message as PydanticMessage, MessageCreate, MessageUpdate from letta.schemas.passage import Passage as PydanticPassage from letta.schemas.source import Source as PydanticSource from letta.schemas.tool import Tool as PydanticTool @@ -493,7 +498,6 @@ class AgentManager: # blocks block_ids = list(agent_create.block_ids or []) if agent_create.memory_blocks: - pydantic_blocks = [PydanticBlock(**b.model_dump(to_orm=True)) for b in agent_create.memory_blocks] # Inject a description for the default blocks if the user didn't specify them @@ -798,7 +802,6 @@ class AgentManager: agent_update: UpdateAgent, actor: PydanticUser, ) -> PydanticAgentState: - new_tools = set(agent_update.tool_ids or []) new_sources = set(agent_update.source_ids or []) new_blocks = set(agent_update.block_ids or []) @@ -806,7 +809,6 @@ class AgentManager: new_tags = set(agent_update.tags or []) with db_registry.session() as session, session.begin(): - agent: AgentModel = AgentModel.read(db_session=session, identifier=agent_id, actor=actor) agent.updated_at = datetime.now(timezone.utc) agent.last_updated_by_id = actor.id @@ -923,7 +925,6 @@ class AgentManager: agent_update: UpdateAgent, actor: PydanticUser, ) -> PydanticAgentState: - new_tools = set(agent_update.tool_ids or []) new_sources = set(agent_update.source_ids or []) new_blocks = set(agent_update.block_ids or []) @@ -931,7 +932,6 @@ class AgentManager: new_tags = set(agent_update.tags or []) async with db_registry.async_session() as session, session.begin(): - agent: AgentModel = await AgentModel.read_async(db_session=session, identifier=agent_id, actor=actor) agent.updated_at = datetime.now(timezone.utc) agent.last_updated_by_id = actor.id diff --git a/letta/services/archive_manager.py b/letta/services/archive_manager.py index e547ac07..d006021a 100644 --- a/letta/services/archive_manager.py +++ b/letta/services/archive_manager.py @@ -4,9 +4,7 @@ from sqlalchemy import select from letta.helpers.tpuf_client import should_use_tpuf from letta.log import get_logger -from letta.orm import ArchivalPassage -from letta.orm import Archive as ArchiveModel -from letta.orm import ArchivesAgents +from letta.orm import ArchivalPassage, Archive as ArchiveModel, ArchivesAgents from letta.schemas.archive import Archive as PydanticArchive from letta.schemas.enums import VectorDBProvider from letta.schemas.user import User as PydanticUser diff --git a/letta/services/block_manager.py b/letta/services/block_manager.py index 678e3f57..0635fc42 100644 --- a/letta/services/block_manager.py +++ b/letta/services/block_manager.py @@ -13,8 +13,7 @@ from letta.orm.blocks_agents import BlocksAgents from letta.orm.errors import NoResultFound from letta.otel.tracing import trace_method from letta.schemas.agent import AgentState as PydanticAgentState -from letta.schemas.block import Block as PydanticBlock -from letta.schemas.block import BlockUpdate +from letta.schemas.block import Block as PydanticBlock, BlockUpdate from letta.schemas.enums import ActorType from letta.schemas.user import User as PydanticUser from letta.server.db import db_registry diff --git a/letta/services/context_window_calculator/token_counter.py b/letta/services/context_window_calculator/token_counter.py index 52e43244..244a8a52 100644 --- a/letta/services/context_window_calculator/token_counter.py +++ b/letta/services/context_window_calculator/token_counter.py @@ -50,7 +50,8 @@ class AnthropicTokenCounter(TokenCounter): @trace_method @async_redis_cache( - key_func=lambda self, messages: f"anthropic_message_tokens:{self.model}:{hashlib.sha256(json.dumps(messages, sort_keys=True).encode()).hexdigest()[:16]}", + key_func=lambda self, + messages: f"anthropic_message_tokens:{self.model}:{hashlib.sha256(json.dumps(messages, sort_keys=True).encode()).hexdigest()[:16]}", prefix="token_counter", ttl_s=3600, # cache for 1 hour ) @@ -61,7 +62,8 @@ class AnthropicTokenCounter(TokenCounter): @trace_method @async_redis_cache( - key_func=lambda self, tools: f"anthropic_tool_tokens:{self.model}:{hashlib.sha256(json.dumps([t.model_dump() for t in tools], sort_keys=True).encode()).hexdigest()[:16]}", + key_func=lambda self, + tools: f"anthropic_tool_tokens:{self.model}:{hashlib.sha256(json.dumps([t.model_dump() for t in tools], sort_keys=True).encode()).hexdigest()[:16]}", prefix="token_counter", ttl_s=3600, # cache for 1 hour ) @@ -93,7 +95,8 @@ class TiktokenCounter(TokenCounter): @trace_method @async_redis_cache( - key_func=lambda self, messages: f"tiktoken_message_tokens:{self.model}:{hashlib.sha256(json.dumps(messages, sort_keys=True).encode()).hexdigest()[:16]}", + key_func=lambda self, + messages: f"tiktoken_message_tokens:{self.model}:{hashlib.sha256(json.dumps(messages, sort_keys=True).encode()).hexdigest()[:16]}", prefix="token_counter", ttl_s=3600, # cache for 1 hour ) @@ -106,7 +109,8 @@ class TiktokenCounter(TokenCounter): @trace_method @async_redis_cache( - key_func=lambda self, tools: f"tiktoken_tool_tokens:{self.model}:{hashlib.sha256(json.dumps([t.model_dump() for t in tools], sort_keys=True).encode()).hexdigest()[:16]}", + key_func=lambda self, + tools: f"tiktoken_tool_tokens:{self.model}:{hashlib.sha256(json.dumps([t.model_dump() for t in tools], sort_keys=True).encode()).hexdigest()[:16]}", prefix="token_counter", ttl_s=3600, # cache for 1 hour ) diff --git a/letta/services/file_manager.py b/letta/services/file_manager.py index b43c3e19..3cac0400 100644 --- a/letta/services/file_manager.py +++ b/letta/services/file_manager.py @@ -12,8 +12,7 @@ from letta.constants import MAX_FILENAME_LENGTH from letta.helpers.pinecone_utils import list_pinecone_index_for_files, should_use_pinecone from letta.log import get_logger from letta.orm.errors import NoResultFound -from letta.orm.file import FileContent as FileContentModel -from letta.orm.file import FileMetadata as FileMetadataModel +from letta.orm.file import FileContent as FileContentModel, FileMetadata as FileMetadataModel from letta.orm.sqlalchemy_base import AccessType from letta.otel.tracing import trace_method from letta.schemas.enums import FileProcessingStatus @@ -60,7 +59,6 @@ class FileManager: *, text: Optional[str] = None, ) -> PydanticFileMetadata: - # short-circuit if it already exists existing = await self.get_file_by_id(file_metadata.id, actor=actor) if existing: diff --git a/letta/services/files_agents_manager.py b/letta/services/files_agents_manager.py index 381fb128..b5213250 100644 --- a/letta/services/files_agents_manager.py +++ b/letta/services/files_agents_manager.py @@ -7,10 +7,8 @@ from letta.log import get_logger from letta.orm.errors import NoResultFound from letta.orm.files_agents import FileAgent as FileAgentModel from letta.otel.tracing import trace_method -from letta.schemas.block import Block as PydanticBlock -from letta.schemas.block import FileBlock as PydanticFileBlock -from letta.schemas.file import FileAgent as PydanticFileAgent -from letta.schemas.file import FileMetadata +from letta.schemas.block import Block as PydanticBlock, FileBlock as PydanticFileBlock +from letta.schemas.file import FileAgent as PydanticFileAgent, FileMetadata from letta.schemas.user import User as PydanticUser from letta.server.db import db_registry from letta.utils import enforce_types diff --git a/letta/services/group_manager.py b/letta/services/group_manager.py index fb393add..8b2a9b7f 100644 --- a/letta/services/group_manager.py +++ b/letta/services/group_manager.py @@ -8,8 +8,7 @@ from letta.orm.errors import NoResultFound from letta.orm.group import Group as GroupModel from letta.orm.message import Message as MessageModel from letta.otel.tracing import trace_method -from letta.schemas.group import Group as PydanticGroup -from letta.schemas.group import GroupCreate, GroupUpdate, InternalTemplateGroupCreate, ManagerType +from letta.schemas.group import Group as PydanticGroup, GroupCreate, GroupUpdate, InternalTemplateGroupCreate, ManagerType from letta.schemas.letta_message import LettaMessage from letta.schemas.message import Message as PydanticMessage from letta.schemas.user import User as PydanticUser @@ -18,7 +17,6 @@ from letta.utils import enforce_types class GroupManager: - @enforce_types @trace_method async def list_groups_async( diff --git a/letta/services/helpers/agent_manager_helper.py b/letta/services/helpers/agent_manager_helper.py index b302039a..d3880a6c 100644 --- a/letta/services/helpers/agent_manager_helper.py +++ b/letta/services/helpers/agent_manager_helper.py @@ -464,7 +464,6 @@ def package_initial_message_sequence( # create the agent object init_messages = [] for message_create in initial_message_sequence: - if message_create.role == MessageRole.user: packed_message = system.package_user_message( user_message=message_create.content, @@ -498,8 +497,10 @@ def package_initial_message_sequence( import json import uuid - from openai.types.chat.chat_completion_message_tool_call import ChatCompletionMessageToolCall as OpenAIToolCall - from openai.types.chat.chat_completion_message_tool_call import Function as OpenAIFunction + from openai.types.chat.chat_completion_message_tool_call import ( + ChatCompletionMessageToolCall as OpenAIToolCall, + Function as OpenAIFunction, + ) from letta.constants import DEFAULT_MESSAGE_TOOL diff --git a/letta/services/identity_manager.py b/letta/services/identity_manager.py index da04e907..f93c61bb 100644 --- a/letta/services/identity_manager.py +++ b/letta/services/identity_manager.py @@ -9,8 +9,14 @@ from letta.orm.block import Block as BlockModel from letta.orm.errors import UniqueConstraintViolationError from letta.orm.identity import Identity as IdentityModel from letta.otel.tracing import trace_method -from letta.schemas.identity import Identity as PydanticIdentity -from letta.schemas.identity import IdentityCreate, IdentityProperty, IdentityType, IdentityUpdate, IdentityUpsert +from letta.schemas.identity import ( + Identity as PydanticIdentity, + IdentityCreate, + IdentityProperty, + IdentityType, + IdentityUpdate, + IdentityUpsert, +) from letta.schemas.user import User as PydanticUser from letta.server.db import db_registry from letta.settings import DatabaseChoice, settings @@ -18,7 +24,6 @@ from letta.utils import enforce_types class IdentityManager: - @enforce_types @trace_method async def list_identities_async( diff --git a/letta/services/job_manager.py b/letta/services/job_manager.py index 60afcfd1..c755faab 100644 --- a/letta/services/job_manager.py +++ b/letta/services/job_manager.py @@ -13,13 +13,10 @@ from letta.orm.job import Job as JobModel from letta.orm.job_messages import JobMessage from letta.orm.message import Message as MessageModel from letta.orm.sqlalchemy_base import AccessType -from letta.orm.step import Step -from letta.orm.step import Step as StepModel +from letta.orm.step import Step, Step as StepModel from letta.otel.tracing import log_event, trace_method from letta.schemas.enums import JobStatus, JobType, MessageRole -from letta.schemas.job import BatchJob as PydanticBatchJob -from letta.schemas.job import Job as PydanticJob -from letta.schemas.job import JobUpdate, LettaRequestConfig +from letta.schemas.job import BatchJob as PydanticBatchJob, Job as PydanticJob, JobUpdate, LettaRequestConfig from letta.schemas.letta_message import LettaMessage from letta.schemas.message import Message as PydanticMessage from letta.schemas.run import Run as PydanticRun diff --git a/letta/services/llm_batch_manager.py b/letta/services/llm_batch_manager.py index e9fe6bf3..50f560da 100644 --- a/letta/services/llm_batch_manager.py +++ b/letta/services/llm_batch_manager.py @@ -11,9 +11,7 @@ from letta.orm.llm_batch_items import LLMBatchItem from letta.orm.llm_batch_job import LLMBatchJob from letta.otel.tracing import trace_method from letta.schemas.enums import AgentStepStatus, JobStatus, ProviderType -from letta.schemas.llm_batch_job import AgentStepState -from letta.schemas.llm_batch_job import LLMBatchItem as PydanticLLMBatchItem -from letta.schemas.llm_batch_job import LLMBatchJob as PydanticLLMBatchJob +from letta.schemas.llm_batch_job import AgentStepState, LLMBatchItem as PydanticLLMBatchItem, LLMBatchJob as PydanticLLMBatchJob from letta.schemas.llm_config import LLMConfig from letta.schemas.message import Message as PydanticMessage from letta.schemas.user import User as PydanticUser diff --git a/letta/services/mcp/base_client.py b/letta/services/mcp/base_client.py index 394064bc..95259344 100644 --- a/letta/services/mcp/base_client.py +++ b/letta/services/mcp/base_client.py @@ -1,8 +1,7 @@ from contextlib import AsyncExitStack from typing import Optional, Tuple -from mcp import ClientSession -from mcp import Tool as MCPTool +from mcp import ClientSession, Tool as MCPTool from mcp.client.auth import OAuthClientProvider from mcp.types import TextContent diff --git a/letta/services/mcp_manager.py b/letta/services/mcp_manager.py index 4c272d8f..775181c9 100644 --- a/letta/services/mcp_manager.py +++ b/letta/services/mcp_manager.py @@ -33,8 +33,7 @@ from letta.schemas.mcp import ( UpdateStdioMCPServer, UpdateStreamableHTTPMCPServer, ) -from letta.schemas.tool import Tool as PydanticTool -from letta.schemas.tool import ToolCreate +from letta.schemas.tool import Tool as PydanticTool, ToolCreate from letta.schemas.user import User as PydanticUser from letta.server.db import db_registry from letta.services.mcp.sse_client import MCP_CONFIG_TOPLEVEL_KEY, AsyncSSEMCPClient @@ -137,8 +136,7 @@ class MCPManager: if mcp_tool.health: if mcp_tool.health.status == "INVALID": raise ValueError( - f"Tool {mcp_tool_name} cannot be attached, JSON schema is invalid." - f"Reasons: {', '.join(mcp_tool.health.reasons)}" + f"Tool {mcp_tool_name} cannot be attached, JSON schema is invalid.Reasons: {', '.join(mcp_tool.health.reasons)}" ) tool_create = ToolCreate.from_mcp(mcp_server_name=mcp_server_name, mcp_tool=mcp_tool) @@ -305,7 +303,9 @@ class MCPManager: async with db_registry.async_session() as session: mcp_servers = await MCPServerModel.list_async( - db_session=session, organization_id=actor.organization_id, id=mcp_server_ids # This will use the IN operator + db_session=session, + organization_id=actor.organization_id, + id=mcp_server_ids, # This will use the IN operator ) return [mcp_server.to_pydantic() for mcp_server in mcp_servers] @@ -407,7 +407,6 @@ class MCPManager: # with the value being the schema from StdioServerParameters if MCP_CONFIG_TOPLEVEL_KEY in mcp_config: for server_name, server_params_raw in mcp_config[MCP_CONFIG_TOPLEVEL_KEY].items(): - # No support for duplicate server names if server_name in mcp_server_list: # Duplicate server names are configuration issues, not system errors diff --git a/letta/services/message_manager.py b/letta/services/message_manager.py index 408abba7..4e552548 100644 --- a/letta/services/message_manager.py +++ b/letta/services/message_manager.py @@ -12,8 +12,7 @@ from letta.otel.tracing import trace_method from letta.schemas.enums import MessageRole from letta.schemas.letta_message import LettaMessageUpdateUnion from letta.schemas.letta_message_content import ImageSourceType, LettaImage, MessageContentType -from letta.schemas.message import Message as PydanticMessage -from letta.schemas.message import MessageUpdate +from letta.schemas.message import Message as PydanticMessage, MessageUpdate from letta.schemas.user import User as PydanticUser from letta.server.db import db_registry from letta.services.file_manager import FileManager @@ -185,9 +184,9 @@ class MessageManager: # modify the tool call for send_message # TODO: fix this if we add parallel tool calls # TODO: note this only works if the AssistantMessage is generated by the standard send_message - assert ( - message.tool_calls[0].function.name == "send_message" - ), f"Expected the first tool call to be send_message, but got {message.tool_calls[0].function.name}" + assert message.tool_calls[0].function.name == "send_message", ( + f"Expected the first tool call to be send_message, but got {message.tool_calls[0].function.name}" + ) original_args = json.loads(message.tool_calls[0].function.arguments) original_args["message"] = letta_message_update.content # override the assistant message update_tool_call = message.tool_calls[0].__deepcopy__() @@ -224,9 +223,9 @@ class MessageManager: # modify the tool call for send_message # TODO: fix this if we add parallel tool calls # TODO: note this only works if the AssistantMessage is generated by the standard send_message - assert ( - message.tool_calls[0].function.name == "send_message" - ), f"Expected the first tool call to be send_message, but got {message.tool_calls[0].function.name}" + assert message.tool_calls[0].function.name == "send_message", ( + f"Expected the first tool call to be send_message, but got {message.tool_calls[0].function.name}" + ) original_args = json.loads(message.tool_calls[0].function.arguments) original_args["message"] = letta_message_update.content # override the assistant message update_tool_call = message.tool_calls[0].__deepcopy__() diff --git a/letta/services/organization_manager.py b/letta/services/organization_manager.py index b7a5abc8..e72defd3 100644 --- a/letta/services/organization_manager.py +++ b/letta/services/organization_manager.py @@ -4,8 +4,7 @@ from letta.constants import DEFAULT_ORG_ID, DEFAULT_ORG_NAME from letta.orm.errors import NoResultFound from letta.orm.organization import Organization as OrganizationModel from letta.otel.tracing import trace_method -from letta.schemas.organization import Organization as PydanticOrganization -from letta.schemas.organization import OrganizationUpdate +from letta.schemas.organization import Organization as PydanticOrganization, OrganizationUpdate from letta.server.db import db_registry from letta.utils import enforce_types diff --git a/letta/services/provider_manager.py b/letta/services/provider_manager.py index cfb32a82..57c1cc42 100644 --- a/letta/services/provider_manager.py +++ b/letta/services/provider_manager.py @@ -3,15 +3,13 @@ from typing import List, Optional, Tuple, Union from letta.orm.provider import Provider as ProviderModel from letta.otel.tracing import trace_method from letta.schemas.enums import ProviderCategory, ProviderType -from letta.schemas.providers import Provider as PydanticProvider -from letta.schemas.providers import ProviderCheck, ProviderCreate, ProviderUpdate +from letta.schemas.providers import Provider as PydanticProvider, ProviderCheck, ProviderCreate, ProviderUpdate from letta.schemas.user import User as PydanticUser from letta.server.db import db_registry from letta.utils import enforce_types class ProviderManager: - @enforce_types @trace_method def create_provider(self, request: ProviderCreate, actor: PydanticUser) -> PydanticProvider: diff --git a/letta/services/sandbox_config_manager.py b/letta/services/sandbox_config_manager.py index 5289ede9..bb069982 100644 --- a/letta/services/sandbox_config_manager.py +++ b/letta/services/sandbox_config_manager.py @@ -3,15 +3,20 @@ from typing import Dict, List, Optional from letta.constants import LETTA_TOOL_EXECUTION_DIR from letta.log import get_logger from letta.orm.errors import NoResultFound -from letta.orm.sandbox_config import SandboxConfig as SandboxConfigModel -from letta.orm.sandbox_config import SandboxEnvironmentVariable as SandboxEnvVarModel +from letta.orm.sandbox_config import SandboxConfig as SandboxConfigModel, SandboxEnvironmentVariable as SandboxEnvVarModel from letta.otel.tracing import trace_method from letta.schemas.enums import SandboxType -from letta.schemas.environment_variables import SandboxEnvironmentVariable as PydanticEnvVar -from letta.schemas.environment_variables import SandboxEnvironmentVariableCreate, SandboxEnvironmentVariableUpdate -from letta.schemas.sandbox_config import LocalSandboxConfig -from letta.schemas.sandbox_config import SandboxConfig as PydanticSandboxConfig -from letta.schemas.sandbox_config import SandboxConfigCreate, SandboxConfigUpdate +from letta.schemas.environment_variables import ( + SandboxEnvironmentVariable as PydanticEnvVar, + SandboxEnvironmentVariableCreate, + SandboxEnvironmentVariableUpdate, +) +from letta.schemas.sandbox_config import ( + LocalSandboxConfig, + SandboxConfig as PydanticSandboxConfig, + SandboxConfigCreate, + SandboxConfigUpdate, +) from letta.schemas.user import User as PydanticUser from letta.server.db import db_registry from letta.utils import enforce_types, printd diff --git a/letta/services/source_manager.py b/letta/services/source_manager.py index 28b314b0..38a21437 100644 --- a/letta/services/source_manager.py +++ b/letta/services/source_manager.py @@ -9,8 +9,7 @@ from letta.orm.source import Source as SourceModel from letta.orm.sources_agents import SourcesAgents from letta.otel.tracing import trace_method from letta.schemas.agent import AgentState as PydanticAgentState -from letta.schemas.source import Source as PydanticSource -from letta.schemas.source import SourceUpdate +from letta.schemas.source import Source as PydanticSource, SourceUpdate from letta.schemas.user import User as PydanticUser from letta.server.db import db_registry from letta.utils import enforce_types, printd diff --git a/letta/services/step_manager.py b/letta/services/step_manager.py index 5389d7b2..4007bf78 100644 --- a/letta/services/step_manager.py +++ b/letta/services/step_manager.py @@ -29,7 +29,6 @@ class FeedbackType(str, Enum): class StepManager: - @enforce_types @trace_method async def list_steps_async( diff --git a/letta/services/summarizer/summarizer.py b/letta/services/summarizer/summarizer.py index 4a57e6c5..575bf351 100644 --- a/letta/services/summarizer/summarizer.py +++ b/letta/services/summarizer/summarizer.py @@ -137,7 +137,7 @@ class Summarizer: total_message_count = len(all_in_context_messages) assert self.partial_evict_summarizer_percentage >= 0.0 and self.partial_evict_summarizer_percentage <= 1.0 target_message_start = round((1.0 - self.partial_evict_summarizer_percentage) * total_message_count) - logger.info(f"Target message count: {total_message_count}->{(total_message_count-target_message_start)}") + logger.info(f"Target message count: {total_message_count}->{(total_message_count - target_message_start)}") # The summary message we'll insert is role 'user' (vs 'assistant', 'tool', or 'system') # We are going to put it at index 1 (index 0 is the system message) diff --git a/letta/services/telemetry_manager.py b/letta/services/telemetry_manager.py index bf8efd80..b23d6246 100644 --- a/letta/services/telemetry_manager.py +++ b/letta/services/telemetry_manager.py @@ -2,8 +2,7 @@ from letta.helpers.json_helpers import json_dumps, json_loads from letta.helpers.singleton import singleton from letta.orm.provider_trace import ProviderTrace as ProviderTraceModel from letta.otel.tracing import trace_method -from letta.schemas.provider_trace import ProviderTrace as PydanticProviderTrace -from letta.schemas.provider_trace import ProviderTraceCreate +from letta.schemas.provider_trace import ProviderTrace as PydanticProviderTrace, ProviderTraceCreate from letta.schemas.step import Step as PydanticStep from letta.schemas.user import User as PydanticUser from letta.server.db import db_registry @@ -11,7 +10,6 @@ from letta.utils import enforce_types class TelemetryManager: - @enforce_types @trace_method async def get_provider_trace_by_step_id_async( diff --git a/letta/services/tool_executor/builtin_tool_executor.py b/letta/services/tool_executor/builtin_tool_executor.py index a4146320..144756c1 100644 --- a/letta/services/tool_executor/builtin_tool_executor.py +++ b/letta/services/tool_executor/builtin_tool_executor.py @@ -309,7 +309,7 @@ class LettaBuiltinToolExecutor(ToolExecutor): # Create numbered markdown for the LLM to reference numbered_lines = markdown_content.split("\n") - numbered_markdown = "\n".join([f"{i+1:4d}: {line}" for i, line in enumerate(numbered_lines)]) + numbered_markdown = "\n".join([f"{i + 1:4d}: {line}" for i, line in enumerate(numbered_lines)]) # Truncate if too long max_content_length = 200000 diff --git a/letta/services/tool_executor/core_tool_executor.py b/letta/services/tool_executor/core_tool_executor.py index 26e64c1b..bc5438b4 100644 --- a/letta/services/tool_executor/core_tool_executor.py +++ b/letta/services/tool_executor/core_tool_executor.py @@ -273,14 +273,13 @@ class LettaCoreToolExecutor(ToolExecutor): occurences = current_value.count(old_str) if occurences == 0: raise ValueError( - f"No replacement was performed, old_str `{old_str}` did not appear " f"verbatim in memory block with label `{label}`." + f"No replacement was performed, old_str `{old_str}` did not appear verbatim in memory block with label `{label}`." ) elif occurences > 1: content_value_lines = current_value.split("\n") lines = [idx + 1 for idx, line in enumerate(content_value_lines) if old_str in line] raise ValueError( - f"No replacement was performed. Multiple occurrences of " - f"old_str `{old_str}` in lines {lines}. Please ensure it is unique." + f"No replacement was performed. Multiple occurrences of old_str `{old_str}` in lines {lines}. Please ensure it is unique." ) # Replace old_str with new_str diff --git a/letta/services/tool_executor/files_tool_executor.py b/letta/services/tool_executor/files_tool_executor.py index c3daa9c4..43a3fd97 100644 --- a/letta/services/tool_executor/files_tool_executor.py +++ b/letta/services/tool_executor/files_tool_executor.py @@ -568,7 +568,7 @@ class LettaFileToolExecutor(ToolExecutor): attached_sources = await self.agent_manager.list_attached_sources_async(agent_id=agent_state.id, actor=self.actor) source_ids = [source.id for source in attached_sources] if not source_ids: - return f"No valid source IDs found for attached files" + return "No valid source IDs found for attached files" # Get all attached files for this agent file_agents = await self.files_agents_manager.list_files_for_agent( diff --git a/letta/services/tool_executor/mcp_tool_executor.py b/letta/services/tool_executor/mcp_tool_executor.py index a5f70165..69237cdf 100644 --- a/letta/services/tool_executor/mcp_tool_executor.py +++ b/letta/services/tool_executor/mcp_tool_executor.py @@ -25,7 +25,6 @@ class ExternalMCPToolExecutor(ToolExecutor): sandbox_config: Optional[SandboxConfig] = None, sandbox_env_vars: Optional[Dict[str, Any]] = None, ) -> ToolExecutionResult: - pass mcp_server_tag = [tag for tag in tool.tags if tag.startswith(f"{MCP_TOOL_TAG_NAME_PREFIX}:")] diff --git a/letta/services/tool_executor/sandbox_tool_executor.py b/letta/services/tool_executor/sandbox_tool_executor.py index 462f5ad1..1d105021 100644 --- a/letta/services/tool_executor/sandbox_tool_executor.py +++ b/letta/services/tool_executor/sandbox_tool_executor.py @@ -34,7 +34,6 @@ class SandboxToolExecutor(ToolExecutor): sandbox_config: Optional[SandboxConfig] = None, sandbox_env_vars: Optional[Dict[str, Any]] = None, ) -> ToolExecutionResult: - # Store original memory state if agent_state: orig_memory_str = await agent_state.memory.compile_in_thread_async() diff --git a/letta/services/tool_executor/tool_execution_sandbox.py b/letta/services/tool_executor/tool_execution_sandbox.py index a06486b8..bc35618b 100644 --- a/letta/services/tool_executor/tool_execution_sandbox.py +++ b/letta/services/tool_executor/tool_execution_sandbox.py @@ -100,7 +100,7 @@ class ToolExecutionSandbox: logger.debug(f"Executed tool '{self.tool_name}', logging output from tool run: \n") for log_line in (result.stdout or []) + (result.stderr or []): logger.debug(f"{log_line}") - logger.debug(f"Ending output log from tool run.") + logger.debug("Ending output log from tool run.") # Return result return result @@ -267,7 +267,6 @@ class ToolExecutionSandbox: try: with self.temporary_env_vars(env): - # Read and compile the Python script with open(temp_file_path, "r", encoding="utf-8") as f: source = f.read() @@ -475,7 +474,7 @@ class ToolExecutionSandbox: return None, None result = pickle.loads(base64.b64decode(text)) agent_state = None - if not result["agent_state"] is None: + if result["agent_state"] is not None: agent_state = result["agent_state"] return result["results"], agent_state diff --git a/letta/services/tool_manager.py b/letta/services/tool_manager.py index 25596445..97aa6db9 100644 --- a/letta/services/tool_manager.py +++ b/letta/services/tool_manager.py @@ -27,8 +27,7 @@ from letta.orm.errors import NoResultFound from letta.orm.tool import Tool as ToolModel from letta.otel.tracing import trace_method from letta.schemas.enums import ToolType -from letta.schemas.tool import Tool as PydanticTool -from letta.schemas.tool import ToolCreate, ToolUpdate +from letta.schemas.tool import Tool as PydanticTool, ToolCreate, ToolUpdate from letta.schemas.user import User as PydanticUser from letta.server.db import db_registry from letta.services.helpers.agent_manager_helper import calculate_multi_agent_tools diff --git a/letta/services/tool_sandbox/modal_deployment_manager.py b/letta/services/tool_sandbox/modal_deployment_manager.py index f423fb62..a922bfc8 100644 --- a/letta/services/tool_sandbox/modal_deployment_manager.py +++ b/letta/services/tool_sandbox/modal_deployment_manager.py @@ -183,9 +183,9 @@ class ModalDeploymentManager: existing_app = await self._try_get_existing_app(sbx_config, version_hash, user) if existing_app: return existing_app, version_hash - raise RuntimeError(f"Deployment completed but app not found") + raise RuntimeError("Deployment completed but app not found") else: - raise RuntimeError(f"Timeout waiting for deployment") + raise RuntimeError("Timeout waiting for deployment") # We're deploying - mark as in progress deployment_key = None diff --git a/letta/services/user_manager.py b/letta/services/user_manager.py index ea391953..bfa73ab0 100644 --- a/letta/services/user_manager.py +++ b/letta/services/user_manager.py @@ -10,8 +10,7 @@ from letta.orm.errors import NoResultFound from letta.orm.organization import Organization as OrganizationModel from letta.orm.user import User as UserModel from letta.otel.tracing import trace_method -from letta.schemas.user import User as PydanticUser -from letta.schemas.user import UserUpdate +from letta.schemas.user import User as PydanticUser, UserUpdate from letta.server.db import db_registry from letta.utils import enforce_types diff --git a/letta/settings.py b/letta/settings.py index 6f9c3531..1cf8a62e 100644 --- a/letta/settings.py +++ b/letta/settings.py @@ -89,7 +89,6 @@ class SummarizerSettings(BaseSettings): class ModelSettings(BaseSettings): - model_config = SettingsConfigDict(env_file=".env", extra="ignore") global_max_context_window_limit: int = 32000 diff --git a/letta/streaming_interface.py b/letta/streaming_interface.py index 7533e25f..83d5e2c6 100644 --- a/letta/streaming_interface.py +++ b/letta/streaming_interface.py @@ -117,9 +117,9 @@ class StreamingCLIInterface(AgentChunkStreamingInterface): # Starting a new buffer line if not self.streaming_buffer_type: - assert not ( - message_delta.content is not None and message_delta.tool_calls is not None and len(message_delta.tool_calls) - ), f"Error: got both content and tool_calls in message stream\n{message_delta}" + assert not (message_delta.content is not None and message_delta.tool_calls is not None and len(message_delta.tool_calls)), ( + f"Error: got both content and tool_calls in message stream\n{message_delta}" + ) if message_delta.content is not None: # Write out the prefix for inner thoughts diff --git a/letta/system.py b/letta/system.py index a44f5f7b..f76c836e 100644 --- a/letta/system.py +++ b/letta/system.py @@ -187,7 +187,7 @@ def package_summarize_message(summary, summary_message_count, hidden_message_cou def package_summarize_message_no_counts(summary, timezone): context_message = ( - f"Note: prior messages have been hidden from view due to conversation memory constraints.\n" + "Note: prior messages have been hidden from view due to conversation memory constraints.\n" + f"The following is a summary of the previous messages:\n {summary}" ) diff --git a/letta/utils.py b/letta/utils.py index b3983eab..d5aafc24 100644 --- a/letta/utils.py +++ b/letta/utils.py @@ -1149,7 +1149,6 @@ class CancellationSignal: """ def __init__(self, job_manager=None, job_id=None, actor=None): - from letta.log import get_logger from letta.schemas.user import User from letta.services.job_manager import JobManager diff --git a/mcp_test.py b/mcp_test.py index 507d5c7c..75e30365 100644 --- a/mcp_test.py +++ b/mcp_test.py @@ -83,7 +83,7 @@ class CallbackHandler(BaseHTTPRequestHandler):

Authorization Failed

-

Error: {query_params['error'][0]}

+

Error: {query_params["error"][0]}

You can close this window and return to the terminal.

diff --git a/paper_experiments/doc_qa_task/doc_qa.py b/paper_experiments/doc_qa_task/doc_qa.py index af2cf86a..a999de11 100644 --- a/paper_experiments/doc_qa_task/doc_qa.py +++ b/paper_experiments/doc_qa_task/doc_qa.py @@ -100,7 +100,7 @@ def generate_docqa_baseline_response( # print(f"Top {num_documents} documents: {documents_search_results_sorted_by_relevance}") # compute truncation length - extra_text = BASELINE_PROMPT + f"Question: {question}" + f"Answer:" + extra_text = BASELINE_PROMPT + f"Question: {question}" + "Answer:" padding = count_tokens(extra_text) + 1000 truncation_length = int((config.default_llm_config.context_window - padding) / num_documents) print("Token size", config.default_llm_config.context_window) @@ -114,7 +114,7 @@ def generate_docqa_baseline_response( if i >= num_documents: break - doc_prompt = f"Document [{i+1}]: {doc} \n" + doc_prompt = f"Document [{i + 1}]: {doc} \n" # truncate (that's why the performance goes down as x-axis increases) if truncation_length is not None: diff --git a/paper_experiments/doc_qa_task/llm_judge_doc_qa.py b/paper_experiments/doc_qa_task/llm_judge_doc_qa.py index c6ff6cfe..c7d1d385 100644 --- a/paper_experiments/doc_qa_task/llm_judge_doc_qa.py +++ b/paper_experiments/doc_qa_task/llm_judge_doc_qa.py @@ -124,7 +124,7 @@ if __name__ == "__main__": if a in response: found = True - if not found and not "INSUFFICIENT INFORMATION" in response: + if not found and "INSUFFICIENT INFORMATION" not in response: # inconclusive: pass to llm judge print(question) print(answer) diff --git a/paper_experiments/nested_kv_task/nested_kv.py b/paper_experiments/nested_kv_task/nested_kv.py index 4f439072..2f259227 100644 --- a/paper_experiments/nested_kv_task/nested_kv.py +++ b/paper_experiments/nested_kv_task/nested_kv.py @@ -61,13 +61,13 @@ def archival_memory_text_search(self, query: str, page: Optional[int] = 0) -> Op try: page = int(page) except: - raise ValueError(f"'page' argument must be an integer") + raise ValueError("'page' argument must be an integer") count = 10 results = self.persistence_manager.archival_memory.storage.query_text(query, limit=count, offset=page * count) total = len(results) num_pages = math.ceil(total / count) - 1 # 0 index if len(results) == 0: - results_str = f"No results found." + results_str = "No results found." else: results_pref = f"Showing {len(results)} of {total} results (page {page}/{num_pages}):" results_formatted = [f"memory: {d.text}" for d in results] @@ -253,8 +253,8 @@ if __name__ == "__main__": # overwrite kv_d[current_key] = next_key - print(f"Nested {i+1}") - print(f"Done") + print(f"Nested {i + 1}") + print("Done") def get_nested_key(original_key, kv_d): key = original_key diff --git a/pyproject.toml b/pyproject.toml index 53acd84f..062c53f2 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -67,6 +67,7 @@ dependencies = [ "certifi>=2025.6.15", "markitdown[docx,pdf,pptx]>=0.1.2", "orjson>=3.11.1", + "ruff[dev]>=0.12.10", ] [project.scripts] @@ -108,11 +109,8 @@ dev = [ "pytest-mock>=3.14.0", "pytest-json-report>=1.5.0", "pexpect>=4.9.0", - "black[jupyter]>=24.4.2", "pre-commit>=3.5.0", "pyright>=1.1.347", - "autoflake>=2.3.0", - "isort>=5.13.2", "ipykernel>=6.29.5", "ipdb>=0.13.13", ] @@ -149,18 +147,46 @@ build-backend = "hatchling.build" [tool.hatch.build.targets.wheel] packages = ["letta"] -[tool.black] -line-length = 140 -target-version = ['py310', 'py311', 'py312', 'py313'] -extend-exclude = "examples/*" -[tool.isort] -profile = "black" -line_length = 140 -multi_line_output = 3 -include_trailing_comma = true -force_grid_wrap = 0 -use_parentheses = true +[tool.ruff] +line-length = 140 +target-version = "py312" +extend-exclude = [ + "examples/*", + "tests/data/*", +] + +[tool.ruff.lint] +select = [ + "E", # pycodestyle errors + "W", # pycodestyle warnings + "F", # pyflakes + "I", # isort +] +ignore = [ + "E501", # line too long (handled by formatter) + "E402", # module import not at top of file + "E711", # none-comparison + "E712", # true-false-comparison + "E722", # bare except + "E721", # type comparison + "F401", # unused import + "F821", # undefined name + "F811", # redefined while unused + "F841", # local variable assigned but never used + "W293", # blank line contains whitespace +] + +[tool.ruff.lint.isort] +force-single-line = false +combine-as-imports = true +split-on-trailing-comma = true + +[tool.ruff.format] +quote-style = "double" +indent-style = "space" +skip-magic-trailing-comma = false +line-ending = "auto" [tool.pytest.ini_options] asyncio_mode = "auto" diff --git a/tests/helpers/endpoints_helper.py b/tests/helpers/endpoints_helper.py index 72b91b69..bef12971 100644 --- a/tests/helpers/endpoints_helper.py +++ b/tests/helpers/endpoints_helper.py @@ -143,7 +143,7 @@ def assert_invoked_send_message_with_keyword(messages: Sequence[LettaMessage], k # Message field not in send_message if "message" not in arguments: raise InvalidToolCallError( - messages=[target_message], explanation=f"send_message function call does not have required field `message`" + messages=[target_message], explanation="send_message function call does not have required field `message`" ) # Check that the keyword is in the message arguments @@ -151,7 +151,7 @@ def assert_invoked_send_message_with_keyword(messages: Sequence[LettaMessage], k keyword = keyword.lower() arguments["message"] = arguments["message"].lower() - if not keyword in arguments["message"]: + if keyword not in arguments["message"]: raise InvalidToolCallError(messages=[target_message], explanation=f"Message argument did not contain keyword={keyword}") diff --git a/tests/helpers/utils.py b/tests/helpers/utils.py index 2d4036ef..72df6806 100644 --- a/tests/helpers/utils.py +++ b/tests/helpers/utils.py @@ -12,8 +12,7 @@ from letta.schemas.enums import MessageRole from letta.schemas.file import FileAgent from letta.schemas.memory import ContextWindowOverview from letta.schemas.tool import Tool -from letta.schemas.user import User -from letta.schemas.user import User as PydanticUser +from letta.schemas.user import User, User as PydanticUser from letta.server.rest_api.routers.v1.agents import ImportedAgentsResponse from letta.server.server import SyncServer @@ -66,7 +65,6 @@ def retry_until_success(max_attempts=10, sleep_time_seconds=4): def decorator_retry(func): @functools.wraps(func) def wrapper(*args, **kwargs): - for attempt in range(1, max_attempts + 1): try: return func(*args, **kwargs) @@ -124,32 +122,32 @@ def comprehensive_agent_checks(agent: AgentState, request: Union[CreateAgent, Up assert agent.llm_config == request.llm_config, f"LLM config mismatch: {agent.llm_config} != {request.llm_config}" # Assert embedding configuration - assert ( - agent.embedding_config == request.embedding_config - ), f"Embedding config mismatch: {agent.embedding_config} != {request.embedding_config}" + assert agent.embedding_config == request.embedding_config, ( + f"Embedding config mismatch: {agent.embedding_config} != {request.embedding_config}" + ) # Assert memory blocks if hasattr(request, "memory_blocks"): - assert len(agent.memory.blocks) == len(request.memory_blocks) + len( - request.block_ids - ), f"Memory blocks count mismatch: {len(agent.memory.blocks)} != {len(request.memory_blocks) + len(request.block_ids)}" + assert len(agent.memory.blocks) == len(request.memory_blocks) + len(request.block_ids), ( + f"Memory blocks count mismatch: {len(agent.memory.blocks)} != {len(request.memory_blocks) + len(request.block_ids)}" + ) memory_block_values = {block.value for block in agent.memory.blocks} expected_block_values = {block.value for block in request.memory_blocks} - assert expected_block_values.issubset( - memory_block_values - ), f"Memory blocks mismatch: {expected_block_values} not in {memory_block_values}" + assert expected_block_values.issubset(memory_block_values), ( + f"Memory blocks mismatch: {expected_block_values} not in {memory_block_values}" + ) # Assert tools assert len(agent.tools) == len(request.tool_ids), f"Tools count mismatch: {len(agent.tools)} != {len(request.tool_ids)}" - assert {tool.id for tool in agent.tools} == set( - request.tool_ids - ), f"Tools mismatch: {set(tool.id for tool in agent.tools)} != {set(request.tool_ids)}" + assert {tool.id for tool in agent.tools} == set(request.tool_ids), ( + f"Tools mismatch: {set(tool.id for tool in agent.tools)} != {set(request.tool_ids)}" + ) # Assert sources assert len(agent.sources) == len(request.source_ids), f"Sources count mismatch: {len(agent.sources)} != {len(request.source_ids)}" - assert {source.id for source in agent.sources} == set( - request.source_ids - ), f"Sources mismatch: {set(source.id for source in agent.sources)} != {set(request.source_ids)}" + assert {source.id for source in agent.sources} == set(request.source_ids), ( + f"Sources mismatch: {set(source.id for source in agent.sources)} != {set(request.source_ids)}" + ) # Assert tags assert set(agent.tags) == set(request.tags), f"Tags mismatch: {set(agent.tags)} != {set(request.tags)}" @@ -158,15 +156,15 @@ def comprehensive_agent_checks(agent: AgentState, request: Union[CreateAgent, Up print("TOOLRULES", request.tool_rules) print("AGENTTOOLRULES", agent.tool_rules) if request.tool_rules: - assert len(agent.tool_rules) == len( - request.tool_rules - ), f"Tool rules count mismatch: {len(agent.tool_rules)} != {len(request.tool_rules)}" - assert all( - any(rule.tool_name == req_rule.tool_name for rule in agent.tool_rules) for req_rule in request.tool_rules - ), f"Tool rules mismatch: {agent.tool_rules} != {request.tool_rules}" + assert len(agent.tool_rules) == len(request.tool_rules), ( + f"Tool rules count mismatch: {len(agent.tool_rules)} != {len(request.tool_rules)}" + ) + assert all(any(rule.tool_name == req_rule.tool_name for rule in agent.tool_rules) for req_rule in request.tool_rules), ( + f"Tool rules mismatch: {agent.tool_rules} != {request.tool_rules}" + ) # Assert message_buffer_autoclear - if not request.message_buffer_autoclear is None: + if request.message_buffer_autoclear is not None: assert agent.message_buffer_autoclear == request.message_buffer_autoclear @@ -176,9 +174,9 @@ def validate_context_window_overview( """Validate common sense assertions for ContextWindowOverview""" # 1. Current context size should not exceed maximum - assert ( - overview.context_window_size_current <= overview.context_window_size_max - ), f"Current context size ({overview.context_window_size_current}) exceeds maximum ({overview.context_window_size_max})" + assert overview.context_window_size_current <= overview.context_window_size_max, ( + f"Current context size ({overview.context_window_size_current}) exceeds maximum ({overview.context_window_size_max})" + ) # 2. All token counts should be non-negative assert overview.num_tokens_system >= 0, "System token count cannot be negative" @@ -197,14 +195,14 @@ def validate_context_window_overview( + overview.num_tokens_messages + overview.num_tokens_functions_definitions ) - assert ( - overview.context_window_size_current == expected_total - ), f"Token sum ({expected_total}) doesn't match current size ({overview.context_window_size_current})" + assert overview.context_window_size_current == expected_total, ( + f"Token sum ({expected_total}) doesn't match current size ({overview.context_window_size_current})" + ) # 4. Message count should match messages list length - assert ( - len(overview.messages) == overview.num_messages - ), f"Messages list length ({len(overview.messages)}) doesn't match num_messages ({overview.num_messages})" + assert len(overview.messages) == overview.num_messages, ( + f"Messages list length ({len(overview.messages)}) doesn't match num_messages ({overview.num_messages})" + ) # 5. If summary_memory is None, its token count should be 0 if overview.summary_memory is None: diff --git a/tests/integration_test_batch_api_cron_jobs.py b/tests/integration_test_batch_api_cron_jobs.py index 95d98bf2..6d6fea28 100644 --- a/tests/integration_test_batch_api_cron_jobs.py +++ b/tests/integration_test_batch_api_cron_jobs.py @@ -141,7 +141,7 @@ def create_test_agent(name, actor, test_id: Optional[str] = None, model="anthrop model_endpoint_type="anthropic", model_endpoint="https://api.anthropic.com/v1", context_window=32000, - handle=f"anthropic/claude-3-7-sonnet-latest", + handle="anthropic/claude-3-7-sonnet-latest", put_inner_thoughts_in_kwargs=True, max_tokens=4096, ) @@ -193,7 +193,7 @@ async def create_test_batch_item(server, batch_id, agent_id, default_user): model_endpoint_type="anthropic", model_endpoint="https://api.anthropic.com/v1", context_window=32000, - handle=f"anthropic/claude-3-7-sonnet-latest", + handle="anthropic/claude-3-7-sonnet-latest", put_inner_thoughts_in_kwargs=True, max_tokens=4096, ) diff --git a/tests/integration_test_builtin_tools.py b/tests/integration_test_builtin_tools.py index 7257d7d2..367de6c3 100644 --- a/tests/integration_test_builtin_tools.py +++ b/tests/integration_test_builtin_tools.py @@ -219,7 +219,7 @@ def test_run_code( returns = [m.tool_return for m in tool_returns] assert any(expected in ret for ret in returns), ( - f"For language={language!r}, expected to find '{expected}' in tool_return, " f"but got {returns!r}" + f"For language={language!r}, expected to find '{expected}' in tool_return, but got {returns!r}" ) @@ -357,7 +357,6 @@ async def test_web_search_uses_agent_env_var_model(): patch.dict(os.environ, {WEB_SEARCH_MODEL_ENV_VAR_NAME: "gpt-4o"}), patch("firecrawl.AsyncFirecrawlApp") as mock_firecrawl_class, ): - # setup mocks mock_model_settings.openai_api_key = "test-key" diff --git a/tests/integration_test_chat_completions.py b/tests/integration_test_chat_completions.py index 1c17b4e4..d16be074 100644 --- a/tests/integration_test_chat_completions.py +++ b/tests/integration_test_chat_completions.py @@ -11,8 +11,7 @@ from openai.types.chat.chat_completion_chunk import ChatCompletionChunk from letta.schemas.embedding_config import EmbeddingConfig from letta.schemas.enums import MessageStreamStatus from letta.schemas.llm_config import LLMConfig -from letta.schemas.openai.chat_completion_request import ChatCompletionRequest -from letta.schemas.openai.chat_completion_request import UserMessage as OpenAIUserMessage +from letta.schemas.openai.chat_completion_request import ChatCompletionRequest, UserMessage as OpenAIUserMessage from letta.schemas.tool import ToolCreate from letta.schemas.usage import LettaUsageStatistics from letta.services.tool_manager import ToolManager diff --git a/tests/integration_test_send_message.py b/tests/integration_test_send_message.py index 185f88f6..a63c6d43 100644 --- a/tests/integration_test_send_message.py +++ b/tests/integration_test_send_message.py @@ -92,7 +92,7 @@ USER_MESSAGE_FORCE_LONG_REPLY: List[MessageCreate] = [ USER_MESSAGE_GREETING: List[MessageCreate] = [ MessageCreate( role="user", - content=f"Hi!", + content="Hi!", otid=USER_MESSAGE_OTID, ) ] @@ -464,9 +464,9 @@ def validate_google_format_scrubbing(contents: List[Dict[str, Any]]) -> None: args = function_call.get("args", {}) # Assert that there is no 'thinking' field in the function call arguments - assert ( - "thinking" not in args - ), f"Found 'thinking' field in Google model functionCall args (inner thoughts not scrubbed): {args.get('thinking')}" + assert "thinking" not in args, ( + f"Found 'thinking' field in Google model functionCall args (inner thoughts not scrubbed): {args.get('thinking')}" + ) def assert_image_input_response( diff --git a/tests/integration_test_voice_agent.py b/tests/integration_test_voice_agent.py index 4390e978..4026a07d 100644 --- a/tests/integration_test_voice_agent.py +++ b/tests/integration_test_voice_agent.py @@ -20,8 +20,7 @@ from letta.schemas.letta_message import AssistantMessage, ReasoningMessage, Tool from letta.schemas.letta_message_content import TextContent from letta.schemas.llm_config import LLMConfig from letta.schemas.message import Message, MessageCreate -from letta.schemas.openai.chat_completion_request import ChatCompletionRequest -from letta.schemas.openai.chat_completion_request import UserMessage as OpenAIUserMessage +from letta.schemas.openai.chat_completion_request import ChatCompletionRequest, UserMessage as OpenAIUserMessage from letta.schemas.usage import LettaUsageStatistics from letta.server.server import SyncServer from letta.services.agent_manager import AgentManager diff --git a/tests/mcp_tests/test_mcp.py b/tests/mcp_tests/test_mcp.py index 71eabbe5..54fa786c 100644 --- a/tests/mcp_tests/test_mcp.py +++ b/tests/mcp_tests/test_mcp.py @@ -63,7 +63,7 @@ def create_virtualenv_and_install_requirements(requirements_path: Path, name="ve except subprocess.CalledProcessError as exc: # On failure, try to clean up and recreate once more if not force_recreate: # Avoid infinite recursion - print(f"Initial pip install failed, attempting clean recreation...") + print("Initial pip install failed, attempting clean recreation...") return create_virtualenv_and_install_requirements(requirements_path, name, force_recreate=False) raise RuntimeError(f"pip install failed with exit code {exc.returncode}") diff --git a/tests/mcp_tests/weather/weather.py b/tests/mcp_tests/weather/weather.py index 599b8fd1..8079cd4a 100644 --- a/tests/mcp_tests/weather/weather.py +++ b/tests/mcp_tests/weather/weather.py @@ -27,11 +27,11 @@ def format_alert(feature: dict) -> str: """Format an alert feature into a readable string.""" props = feature["properties"] return f""" -Event: {props.get('event', 'Unknown')} -Area: {props.get('areaDesc', 'Unknown')} -Severity: {props.get('severity', 'Unknown')} -Description: {props.get('description', 'No description available')} -Instructions: {props.get('instruction', 'No specific instructions provided')} +Event: {props.get("event", "Unknown")} +Area: {props.get("areaDesc", "Unknown")} +Severity: {props.get("severity", "Unknown")} +Description: {props.get("description", "No description available")} +Instructions: {props.get("instruction", "No specific instructions provided")} """ @@ -82,10 +82,10 @@ async def get_forecast(latitude: float, longitude: float) -> str: forecasts = [] for period in periods[:5]: # Only show next 5 periods forecast = f""" -{period['name']}: -Temperature: {period['temperature']}°{period['temperatureUnit']} -Wind: {period['windSpeed']} {period['windDirection']} -Forecast: {period['detailedForecast']} +{period["name"]}: +Temperature: {period["temperature"]}°{period["temperatureUnit"]} +Wind: {period["windSpeed"]} {period["windDirection"]} +Forecast: {period["detailedForecast"]} """ forecasts.append(forecast) diff --git a/tests/test_agent_serialization.py b/tests/test_agent_serialization.py index c5272bcb..90588f0c 100644 --- a/tests/test_agent_serialization.py +++ b/tests/test_agent_serialization.py @@ -350,9 +350,9 @@ def compare_in_context_message_id_remapping(server, og_agent: AgentState, copy_a in_context_messages_copy = server.agent_manager.get_in_context_messages(agent_id=copy_agent.id, actor=copy_user) # 1. Check if the number of messages is the same - assert len(in_context_messages_og) == len( - in_context_messages_copy - ), f"Original message count ({len(in_context_messages_og)}) differs from copy ({len(in_context_messages_copy)})" + assert len(in_context_messages_og) == len(in_context_messages_copy), ( + f"Original message count ({len(in_context_messages_og)}) differs from copy ({len(in_context_messages_copy)})" + ) # 2. Iterate and compare messages by order, checking content equality and ID difference if not in_context_messages_og: diff --git a/tests/test_agent_serialization_v2.py b/tests/test_agent_serialization_v2.py index 3a7c004d..40cfb401 100644 --- a/tests/test_agent_serialization_v2.py +++ b/tests/test_agent_serialization_v2.py @@ -979,9 +979,9 @@ class TestAgentFileExport: exported_agent = agent_file.agents[0] for message in exported_agent.messages: - assert ( - message.agent_id == exported_agent.id - ), f"Message {message.id} has agent_id {message.agent_id}, expected {exported_agent.id}" + assert message.agent_id == exported_agent.id, ( + f"Message {message.id} has agent_id {message.agent_id}, expected {exported_agent.id}" + ) assert exported_agent.id == "agent-0" assert exported_agent.id != test_agent.id diff --git a/tests/test_cli.py b/tests/test_cli.py index f1052bd7..3b34cb80 100644 --- a/tests/test_cli.py +++ b/tests/test_cli.py @@ -71,10 +71,10 @@ def test_letta_run_create_new_agent(swap_letta_config): assert full_output is not None, "No output was captured." # Count occurrences of inner thoughts cloud_emoji_count = full_output.count(INNER_THOUGHTS_CLI_SYMBOL) - assert cloud_emoji_count == 1, f"It appears that there are multiple instances of inner thought outputted." + assert cloud_emoji_count == 1, "It appears that there are multiple instances of inner thought outputted." # Count occurrences of assistant messages robot = full_output.count(ASSISTANT_MESSAGE_CLI_SYMBOL) - assert robot == 1, f"It appears that there are multiple instances of assistant messages outputted." + assert robot == 1, "It appears that there are multiple instances of assistant messages outputted." def test_letta_version_prints_only_version(swap_letta_config): diff --git a/tests/test_client.py b/tests/test_client.py index d3de1bc1..caf37cd4 100644 --- a/tests/test_client.py +++ b/tests/test_client.py @@ -675,9 +675,9 @@ def test_timezone(client: Letta): pacific_tz_indicators = {"America/Los_Angeles", "PDT", "PST", "PT", "Pacific Daylight Time", "Pacific Standard Time", "Pacific Time"} content = response.messages[1].content - assert any( - tz in content for tz in pacific_tz_indicators - ), f"Response content: {response.messages[1].content} does not contain expected timezone" + assert any(tz in content for tz in pacific_tz_indicators), ( + f"Response content: {response.messages[1].content} does not contain expected timezone" + ) # test updating the timezone client.agents.modify(agent_id=agent.id, timezone="America/New_York") @@ -686,7 +686,6 @@ def test_timezone(client: Letta): def test_attach_sleeptime_block(client: Letta): - agent = client.agents.create( memory_blocks=[{"label": "human", "value": ""}, {"label": "persona", "value": ""}], model="letta/letta-free", diff --git a/tests/test_google_embeddings.py b/tests/test_google_embeddings.py index dcaad596..5f879933 100644 --- a/tests/test_google_embeddings.py +++ b/tests/test_google_embeddings.py @@ -10,9 +10,7 @@ import threading import time import pytest -from letta_client import CreateBlock -from letta_client import Letta as LettaSDKClient -from letta_client import MessageCreate +from letta_client import CreateBlock, Letta as LettaSDKClient, MessageCreate SERVER_PORT = 8283 @@ -71,7 +69,7 @@ def test_archival_insert_text_embedding_004(client: LettaSDKClient): """ # Create an agent with the specified model and embedding. agent = client.agents.create( - name=f"archival_insert_text_embedding_004", + name="archival_insert_text_embedding_004", memory_blocks=[ CreateBlock(label="human", value="name: archival_test"), CreateBlock(label="persona", value="You are a helpful assistant that loves helping out the user"), @@ -98,9 +96,9 @@ def test_archival_insert_text_embedding_004(client: LettaSDKClient): print(archived_messages.messages) # Assert that the archival message is present. - assert any( - message.status == "success" for message in archived_messages.messages if message.message_type == "tool_return_message" - ), f"Archival message '{archival_message}' not found. Archived messages: {archived_messages}" + assert any(message.status == "success" for message in archived_messages.messages if message.message_type == "tool_return_message"), ( + f"Archival message '{archival_message}' not found. Archived messages: {archived_messages}" + ) # Cleanup: Delete the agent. client.agents.delete(agent.id) @@ -119,7 +117,7 @@ def test_archival_insert_embedding_001(client: LettaSDKClient): """ # Create an agent with the specified model and embedding. agent = client.agents.create( - name=f"archival_insert_embedding_001", + name="archival_insert_embedding_001", memory_blocks=[ CreateBlock(label="human", value="name: archival_test"), CreateBlock(label="persona", value="You are a helpful assistant that loves helping out the user"), @@ -144,9 +142,9 @@ def test_archival_insert_embedding_001(client: LettaSDKClient): ) # Assert that the archival message is present. - assert any( - message.status == "success" for message in archived_messages.messages if message.message_type == "tool_return_message" - ), f"Archival message '{archival_message}' not found. Archived messages: {archived_messages}" + assert any(message.status == "success" for message in archived_messages.messages if message.message_type == "tool_return_message"), ( + f"Archival message '{archival_message}' not found. Archived messages: {archived_messages}" + ) # Cleanup: Delete the agent. client.agents.delete(agent.id) diff --git a/tests/test_letta_agent_batch.py b/tests/test_letta_agent_batch.py index 8f5241a0..afdb2f79 100644 --- a/tests/test_letta_agent_batch.py +++ b/tests/test_letta_agent_batch.py @@ -379,7 +379,7 @@ async def test_rethink_tool_modify_agent_state(disable_e2b_api_key, server, defa ) agents = [agent] batch_requests = [ - LettaBatchRequest(agent_id=agent.id, messages=[MessageCreate(role="user", content=[TextContent(text=f"Rethink memory.")])]) + LettaBatchRequest(agent_id=agent.id, messages=[MessageCreate(role="user", content=[TextContent(text="Rethink memory.")])]) for agent in agents ] @@ -520,12 +520,12 @@ async def test_partial_error_from_anthropic_batch( print("POST", post_resume_response) print("PRE", pre_resume_response) - assert ( - post_resume_response.letta_batch_id == pre_resume_response.letta_batch_id - ), "resume_step_after_request is expected to have the same letta_batch_id" - assert ( - post_resume_response.last_llm_batch_id != pre_resume_response.last_llm_batch_id - ), "resume_step_after_request is expected to have different llm_batch_id." + assert post_resume_response.letta_batch_id == pre_resume_response.letta_batch_id, ( + "resume_step_after_request is expected to have the same letta_batch_id" + ) + assert post_resume_response.last_llm_batch_id != pre_resume_response.last_llm_batch_id, ( + "resume_step_after_request is expected to have different llm_batch_id." + ) assert post_resume_response.status == JobStatus.running # NOTE: We only expect 2 agents to continue (succeeded ones) assert post_resume_response.agent_count == 2 @@ -542,13 +542,13 @@ async def test_partial_error_from_anthropic_batch( # Confirm that tool_rules_solver state was preserved correctly # Assert every new item's step_state's tool_rules_solver has "get_weather" in the tool_call_history - assert all( - "get_weather" in item.step_state.tool_rules_solver.tool_call_history for item in new_items - ), "Expected 'get_weather' in tool_call_history for all new_items" + assert all("get_weather" in item.step_state.tool_rules_solver.tool_call_history for item in new_items), ( + "Expected 'get_weather' in tool_call_history for all new_items" + ) # Assert that each new item's step_number was incremented to 1 - assert all( - item.step_state.step_number == 1 for item in new_items - ), "Expected step_number to be incremented to 1 for all new_items" + assert all(item.step_state.step_number == 1 for item in new_items), ( + "Expected step_number to be incremented to 1 for all new_items" + ) # Old items must have been flipped to completed / finished earlier # (sanity – we already asserted this above, but we keep it close for clarity) @@ -572,20 +572,20 @@ async def test_partial_error_from_anthropic_batch( assert after == before, f"Agent {agent.id} should not have extra messages persisted due to Anthropic failure" else: assert after - before >= 2, ( - f"Agent {agent.id} should have an assistant tool‑call " f"and tool‑response message persisted." + f"Agent {agent.id} should have an assistant tool‑call and tool‑response message persisted." ) # Check that agent states have been properly modified to have extended in-context messages for agent in agents: refreshed_agent = server.agent_manager.get_agent_by_id(agent_id=agent.id, actor=default_user) if refreshed_agent.id == agents_failed[0].id: - assert ( - len(refreshed_agent.message_ids) == 4 - ), f"Agent's in-context messages have not been extended, are length: {len(refreshed_agent.message_ids)}" + assert len(refreshed_agent.message_ids) == 4, ( + f"Agent's in-context messages have not been extended, are length: {len(refreshed_agent.message_ids)}" + ) else: - assert ( - len(refreshed_agent.message_ids) == 6 - ), f"Agent's in-context messages have been extended, are length: {len(refreshed_agent.message_ids)}" + assert len(refreshed_agent.message_ids) == 6, ( + f"Agent's in-context messages have been extended, are length: {len(refreshed_agent.message_ids)}" + ) # Check the total list of messages messages = await server.batch_manager.get_messages_for_letta_batch_async( @@ -688,12 +688,12 @@ async def test_resume_step_some_stop( assert len(new_batch_responses) == 1 post_resume_response = new_batch_responses[0] - assert ( - post_resume_response.letta_batch_id == pre_resume_response.letta_batch_id - ), "resume_step_after_request is expected to have the same letta_batch_id" - assert ( - post_resume_response.last_llm_batch_id != pre_resume_response.last_llm_batch_id - ), "resume_step_after_request is expected to have different llm_batch_id." + assert post_resume_response.letta_batch_id == pre_resume_response.letta_batch_id, ( + "resume_step_after_request is expected to have the same letta_batch_id" + ) + assert post_resume_response.last_llm_batch_id != pre_resume_response.last_llm_batch_id, ( + "resume_step_after_request is expected to have different llm_batch_id." + ) assert post_resume_response.status == JobStatus.running # NOTE: We only expect 1 agent to continue assert post_resume_response.agent_count == 1 @@ -710,13 +710,13 @@ async def test_resume_step_some_stop( # Confirm that tool_rules_solver state was preserved correctly # Assert every new item's step_state's tool_rules_solver has "get_weather" in the tool_call_history - assert all( - "get_weather" in item.step_state.tool_rules_solver.tool_call_history for item in new_items - ), "Expected 'get_weather' in tool_call_history for all new_items" + assert all("get_weather" in item.step_state.tool_rules_solver.tool_call_history for item in new_items), ( + "Expected 'get_weather' in tool_call_history for all new_items" + ) # Assert that each new item's step_number was incremented to 1 - assert all( - item.step_state.step_number == 1 for item in new_items - ), "Expected step_number to be incremented to 1 for all new_items" + assert all(item.step_state.step_number == 1 for item in new_items), ( + "Expected step_number to be incremented to 1 for all new_items" + ) # Old items must have been flipped to completed / finished earlier # (sanity – we already asserted this above, but we keep it close for clarity) @@ -730,16 +730,14 @@ async def test_resume_step_some_stop( for agent in agents: before = msg_counts_before[agent.id] # captured just before resume after = await server.message_manager.size_async(actor=default_user, agent_id=agent.id) - assert after - before >= 2, ( - f"Agent {agent.id} should have an assistant tool‑call " f"and tool‑response message persisted." - ) + assert after - before >= 2, f"Agent {agent.id} should have an assistant tool‑call and tool‑response message persisted." # Check that agent states have been properly modified to have extended in-context messages for agent in agents: refreshed_agent = server.agent_manager.get_agent_by_id(agent_id=agent.id, actor=default_user) - assert ( - len(refreshed_agent.message_ids) == 6 - ), f"Agent's in-context messages have been extended, are length: {len(refreshed_agent.message_ids)}" + assert len(refreshed_agent.message_ids) == 6, ( + f"Agent's in-context messages have been extended, are length: {len(refreshed_agent.message_ids)}" + ) # Check the total list of messages messages = await server.batch_manager.get_messages_for_letta_batch_async( @@ -770,9 +768,9 @@ def _assert_descending_order(messages): return True for prev, next in zip(messages[:-1], messages[1:]): - assert ( - prev.created_at >= next.created_at - ), f"Order violation: {prev.id} ({prev.created_at}) followed by {next.id} ({next.created_at})" + assert prev.created_at >= next.created_at, ( + f"Order violation: {prev.id} ({prev.created_at}) followed by {next.id} ({next.created_at})" + ) return True @@ -853,12 +851,12 @@ async def test_resume_step_after_request_all_continue( assert len(new_batch_responses) == 1 post_resume_response = new_batch_responses[0] - assert ( - post_resume_response.letta_batch_id == pre_resume_response.letta_batch_id - ), "resume_step_after_request is expected to have the same letta_batch_id" - assert ( - post_resume_response.last_llm_batch_id != pre_resume_response.last_llm_batch_id - ), "resume_step_after_request is expected to have different llm_batch_id." + assert post_resume_response.letta_batch_id == pre_resume_response.letta_batch_id, ( + "resume_step_after_request is expected to have the same letta_batch_id" + ) + assert post_resume_response.last_llm_batch_id != pre_resume_response.last_llm_batch_id, ( + "resume_step_after_request is expected to have different llm_batch_id." + ) assert post_resume_response.status == JobStatus.running assert post_resume_response.agent_count == 3 @@ -872,13 +870,13 @@ async def test_resume_step_after_request_all_continue( # Confirm that tool_rules_solver state was preserved correctly # Assert every new item's step_state's tool_rules_solver has "get_weather" in the tool_call_history - assert all( - "get_weather" in item.step_state.tool_rules_solver.tool_call_history for item in new_items - ), "Expected 'get_weather' in tool_call_history for all new_items" + assert all("get_weather" in item.step_state.tool_rules_solver.tool_call_history for item in new_items), ( + "Expected 'get_weather' in tool_call_history for all new_items" + ) # Assert that each new item's step_number was incremented to 1 - assert all( - item.step_state.step_number == 1 for item in new_items - ), "Expected step_number to be incremented to 1 for all new_items" + assert all(item.step_state.step_number == 1 for item in new_items), ( + "Expected step_number to be incremented to 1 for all new_items" + ) # Old items must have been flipped to completed / finished earlier # (sanity – we already asserted this above, but we keep it close for clarity) @@ -892,16 +890,14 @@ async def test_resume_step_after_request_all_continue( for agent in agents: before = msg_counts_before[agent.id] # captured just before resume after = await server.message_manager.size_async(actor=default_user, agent_id=agent.id) - assert after - before >= 2, ( - f"Agent {agent.id} should have an assistant tool‑call " f"and tool‑response message persisted." - ) + assert after - before >= 2, f"Agent {agent.id} should have an assistant tool‑call and tool‑response message persisted." # Check that agent states have been properly modified to have extended in-context messages for agent in agents: refreshed_agent = server.agent_manager.get_agent_by_id(agent_id=agent.id, actor=default_user) - assert ( - len(refreshed_agent.message_ids) == 6 - ), f"Agent's in-context messages have been extended, are length: {len(refreshed_agent.message_ids)}" + assert len(refreshed_agent.message_ids) == 6, ( + f"Agent's in-context messages have been extended, are length: {len(refreshed_agent.message_ids)}" + ) # Check the total list of messages messages = await server.batch_manager.get_messages_for_letta_batch_async( diff --git a/tests/test_managers.py b/tests/test_managers.py index 225e2c01..c79caf93 100644 --- a/tests/test_managers.py +++ b/tests/test_managers.py @@ -16,8 +16,7 @@ import pytest from _pytest.python_api import approx from anthropic.types.beta import BetaMessage from anthropic.types.beta.messages import BetaMessageBatchIndividualResponse, BetaMessageBatchSucceededResult -from openai.types.chat.chat_completion_message_tool_call import ChatCompletionMessageToolCall as OpenAIToolCall -from openai.types.chat.chat_completion_message_tool_call import Function as OpenAIFunction +from openai.types.chat.chat_completion_message_tool_call import ChatCompletionMessageToolCall as OpenAIToolCall, Function as OpenAIFunction from sqlalchemy import func, select from sqlalchemy.exc import IntegrityError, InvalidRequestError from sqlalchemy.orm.exc import StaleDataError @@ -48,11 +47,9 @@ from letta.jobs.types import ItemUpdateInfo, RequestStatusUpdateInfo, StepStatus from letta.orm import Base, Block from letta.orm.block_history import BlockHistory from letta.orm.errors import NoResultFound, UniqueConstraintViolationError -from letta.orm.file import FileContent as FileContentModel -from letta.orm.file import FileMetadata as FileMetadataModel +from letta.orm.file import FileContent as FileContentModel, FileMetadata as FileMetadataModel from letta.schemas.agent import CreateAgent, UpdateAgent -from letta.schemas.block import Block as PydanticBlock -from letta.schemas.block import BlockUpdate, CreateBlock +from letta.schemas.block import Block as PydanticBlock, BlockUpdate, CreateBlock from letta.schemas.embedding_config import EmbeddingConfig from letta.schemas.enums import ( ActorType, @@ -68,35 +65,25 @@ from letta.schemas.enums import ( ToolType, ) from letta.schemas.environment_variables import SandboxEnvironmentVariableCreate, SandboxEnvironmentVariableUpdate -from letta.schemas.file import FileMetadata -from letta.schemas.file import FileMetadata as PydanticFileMetadata +from letta.schemas.file import FileMetadata, FileMetadata as PydanticFileMetadata from letta.schemas.identity import IdentityCreate, IdentityProperty, IdentityPropertyType, IdentityType, IdentityUpdate, IdentityUpsert -from letta.schemas.job import BatchJob -from letta.schemas.job import Job -from letta.schemas.job import Job as PydanticJob -from letta.schemas.job import JobUpdate, LettaRequestConfig +from letta.schemas.job import BatchJob, Job, Job as PydanticJob, JobUpdate, LettaRequestConfig from letta.schemas.letta_message import UpdateAssistantMessage, UpdateReasoningMessage, UpdateSystemMessage, UpdateUserMessage from letta.schemas.letta_message_content import TextContent from letta.schemas.letta_stop_reason import LettaStopReason, StopReasonType from letta.schemas.llm_batch_job import AgentStepState, LLMBatchItem from letta.schemas.llm_config import LLMConfig -from letta.schemas.message import Message as PydanticMessage -from letta.schemas.message import MessageCreate, MessageUpdate +from letta.schemas.message import Message as PydanticMessage, MessageCreate, MessageUpdate from letta.schemas.openai.chat_completion_response import UsageStatistics -from letta.schemas.organization import Organization -from letta.schemas.organization import Organization as PydanticOrganization -from letta.schemas.organization import OrganizationUpdate +from letta.schemas.organization import Organization, Organization as PydanticOrganization, OrganizationUpdate from letta.schemas.passage import Passage as PydanticPassage from letta.schemas.pip_requirement import PipRequirement from letta.schemas.run import Run as PydanticRun from letta.schemas.sandbox_config import E2BSandboxConfig, LocalSandboxConfig, SandboxConfigCreate, SandboxConfigUpdate -from letta.schemas.source import Source as PydanticSource -from letta.schemas.source import SourceUpdate -from letta.schemas.tool import Tool as PydanticTool -from letta.schemas.tool import ToolCreate, ToolUpdate +from letta.schemas.source import Source as PydanticSource, SourceUpdate +from letta.schemas.tool import Tool as PydanticTool, ToolCreate, ToolUpdate from letta.schemas.tool_rule import InitToolRule -from letta.schemas.user import User as PydanticUser -from letta.schemas.user import UserUpdate +from letta.schemas.user import User as PydanticUser, UserUpdate from letta.server.db import db_registry from letta.server.server import SyncServer from letta.services.block_manager import BlockManager @@ -4062,7 +4049,7 @@ async def test_user_caching(server: SyncServer, default_user, performance_pct=0. actor_cached = await server.user_manager.get_actor_by_id_async(default_user.id) duration = timer.elapsed_ns durations.append(duration) - print(f"Call {i+2}: {duration:.2e}ns") + print(f"Call {i + 2}: {duration:.2e}ns") assert actor_cached == actor for d in durations: assert d < duration_first * performance_pct @@ -5306,7 +5293,7 @@ async def test_delete_block_detaches_from_agent(server: SyncServer, sarah_agent, # Check that block has been detached too agent_state = await server.agent_manager.get_agent_by_id_async(agent_id=sarah_agent.id, actor=default_user) - assert not (block.id in [b.id for b in agent_state.memory.blocks]) + assert block.id not in [b.id for b in agent_state.memory.blocks] @pytest.mark.asyncio @@ -5836,7 +5823,9 @@ def test_undo_concurrency_stale(server: SyncServer, default_user): # Session1 -> undo to seq=1 block_manager.undo_checkpoint_block( - block_id=block_v1.id, actor=default_user, use_preloaded_block=block_s1 # stale object from session1 + block_id=block_v1.id, + actor=default_user, + use_preloaded_block=block_s1, # stale object from session1 ) # This commits first => block now points to seq=1, version increments @@ -6133,7 +6122,7 @@ async def test_attach_detach_identity_from_agent(server: SyncServer, sarah_agent # Check that block has been detached too agent_state = await server.agent_manager.get_agent_by_id_async(agent_id=sarah_agent.id, actor=default_user) - assert not identity.id in agent_state.identity_ids + assert identity.id not in agent_state.identity_ids @pytest.mark.asyncio @@ -6172,7 +6161,7 @@ async def test_get_set_agents_for_identities(server: SyncServer, sarah_agent, ch assert sarah_agent.id in agent_state_ids assert charles_agent.id in agent_state_ids assert agent_with_identity.id in agent_state_ids - assert not agent_without_identity.id in agent_state_ids + assert agent_without_identity.id not in agent_state_ids # Get the agents for identifier key agent_states = await server.agent_manager.list_agents_async(identifier_keys=[identity.identifier_key], actor=default_user) @@ -6183,7 +6172,7 @@ async def test_get_set_agents_for_identities(server: SyncServer, sarah_agent, ch assert sarah_agent.id in agent_state_ids assert charles_agent.id in agent_state_ids assert agent_with_identity.id in agent_state_ids - assert not agent_without_identity.id in agent_state_ids + assert agent_without_identity.id not in agent_state_ids # Delete new agents server.agent_manager.delete_agent(agent_id=agent_with_identity.id, actor=default_user) @@ -6274,7 +6263,7 @@ async def test_get_set_blocks_for_identities(server: SyncServer, default_block, block_ids = [b.id for b in blocks] assert default_block.id in block_ids assert block_with_identity.id in block_ids - assert not block_without_identity.id in block_ids + assert block_without_identity.id not in block_ids # Get the blocks for identifier key blocks = await server.block_manager.get_blocks_async(identifier_keys=[identity.identifier_key], actor=default_user) @@ -6284,7 +6273,7 @@ async def test_get_set_blocks_for_identities(server: SyncServer, default_block, block_ids = [b.id for b in blocks] assert default_block.id in block_ids assert block_with_identity.id in block_ids - assert not block_without_identity.id in block_ids + assert block_without_identity.id not in block_ids # Delete new agents server.block_manager.delete_block(block_id=block_with_identity.id, actor=default_user) @@ -6297,8 +6286,8 @@ async def test_get_set_blocks_for_identities(server: SyncServer, default_block, # Check only initial block in the list block_ids = [b.id for b in blocks] assert default_block.id in block_ids - assert not block_with_identity.id in block_ids - assert not block_without_identity.id in block_ids + assert block_with_identity.id not in block_ids + assert block_without_identity.id not in block_ids await server.identity_manager.delete_identity_async(identity_id=identity.id, actor=default_user) @@ -8490,11 +8479,11 @@ def test_get_run_messages(server: SyncServer, default_user: PydanticUser, sarah_ role=MessageRole.tool if i % 2 == 0 else MessageRole.assistant, content=[TextContent(text=f"Test message {i}" if i % 2 == 1 else '{"status": "OK"}')], tool_calls=( - [{"type": "function", "id": f"call_{i//2}", "function": {"name": "custom_tool", "arguments": '{"custom_arg": "test"}'}}] + [{"type": "function", "id": f"call_{i // 2}", "function": {"name": "custom_tool", "arguments": '{"custom_arg": "test"}'}}] if i % 2 == 1 else None ), - tool_call_id=f"call_{i//2}" if i % 2 == 0 else None, + tool_call_id=f"call_{i // 2}" if i % 2 == 0 else None, ) for i in range(4) ] @@ -8540,11 +8529,11 @@ def test_get_run_messages_with_assistant_message(server: SyncServer, default_use role=MessageRole.tool if i % 2 == 0 else MessageRole.assistant, content=[TextContent(text=f"Test message {i}" if i % 2 == 1 else '{"status": "OK"}')], tool_calls=( - [{"type": "function", "id": f"call_{i//2}", "function": {"name": "custom_tool", "arguments": '{"custom_arg": "test"}'}}] + [{"type": "function", "id": f"call_{i // 2}", "function": {"name": "custom_tool", "arguments": '{"custom_arg": "test"}'}}] if i % 2 == 1 else None ), - tool_call_id=f"call_{i//2}" if i % 2 == 0 else None, + tool_call_id=f"call_{i // 2}" if i % 2 == 0 else None, ) for i in range(4) ] @@ -9549,9 +9538,9 @@ async def test_list_batch_items_pagination( llm_batch_id=batch.id, actor=default_user, after=cursor, limit=limit ) # If more than 'limit' items remain, we should only get exactly 'limit' items. - assert len(limited_page) == min( - limit, expected_remaining - ), f"Expected {min(limit, expected_remaining)} items with limit {limit}, got {len(limited_page)}" + assert len(limited_page) == min(limit, expected_remaining), ( + f"Expected {min(limit, expected_remaining)} items with limit {limit}, got {len(limited_page)}" + ) # Optional: Test with a cursor beyond the last item returns an empty list. last_cursor = sorted_ids[-1] @@ -9888,9 +9877,7 @@ async def test_get_mcp_servers_by_ids(server, default_user): async def test_mcp_server_deletion_cascades_oauth_sessions(server, default_organization, default_user): """Deleting an MCP server deletes associated OAuth sessions (same user + URL).""" - from letta.schemas.mcp import MCPOAuthSessionCreate - from letta.schemas.mcp import MCPServer as PydanticMCPServer - from letta.schemas.mcp import MCPServerType + from letta.schemas.mcp import MCPOAuthSessionCreate, MCPServer as PydanticMCPServer, MCPServerType test_server_url = "https://test.example.com/mcp" @@ -9932,9 +9919,7 @@ async def test_mcp_server_deletion_cascades_oauth_sessions(server, default_organ async def test_oauth_sessions_with_different_url_persist(server, default_organization, default_user): """Sessions with different URL should not be deleted when deleting the server for another URL.""" - from letta.schemas.mcp import MCPOAuthSessionCreate - from letta.schemas.mcp import MCPServer as PydanticMCPServer - from letta.schemas.mcp import MCPServerType + from letta.schemas.mcp import MCPOAuthSessionCreate, MCPServer as PydanticMCPServer, MCPServerType server_url = "https://test.example.com/mcp" other_url = "https://other.example.com/mcp" @@ -9973,9 +9958,7 @@ async def test_oauth_sessions_with_different_url_persist(server, default_organiz async def test_mcp_server_creation_links_orphaned_sessions(server, default_organization, default_user): """Creating a server should link any existing orphaned sessions (same user + URL).""" - from letta.schemas.mcp import MCPOAuthSessionCreate - from letta.schemas.mcp import MCPServer as PydanticMCPServer - from letta.schemas.mcp import MCPServerType + from letta.schemas.mcp import MCPOAuthSessionCreate, MCPServer as PydanticMCPServer, MCPServerType server_url = "https://test-atomic-create.example.com/mcp" @@ -10019,9 +10002,7 @@ async def test_mcp_server_creation_links_orphaned_sessions(server, default_organ async def test_mcp_server_delete_removes_all_sessions_for_url_and_user(server, default_organization, default_user): """Deleting a server removes both linked and orphaned sessions for same user+URL.""" - from letta.schemas.mcp import MCPOAuthSessionCreate - from letta.schemas.mcp import MCPServer as PydanticMCPServer - from letta.schemas.mcp import MCPServerType + from letta.schemas.mcp import MCPOAuthSessionCreate, MCPServer as PydanticMCPServer, MCPServerType server_url = "https://test-atomic-cleanup.example.com/mcp" @@ -10711,9 +10692,9 @@ async def test_lru_eviction_on_attach(server, default_user, sarah_agent, default # Should have closed exactly 2 files (e.g., 7 - 5 = 2 for max_files_open=5) expected_closed_count = len(files) - max_files_open - assert ( - len(all_closed_files) == expected_closed_count - ), f"Should have closed {expected_closed_count} files, but closed: {all_closed_files}" + assert len(all_closed_files) == expected_closed_count, ( + f"Should have closed {expected_closed_count} files, but closed: {all_closed_files}" + ) # Check that the oldest files were closed (first N files attached) expected_closed = [files[i].file_name for i in range(expected_closed_count)] diff --git a/tests/test_sdk_client.py b/tests/test_sdk_client.py index 1ac6fddd..480bf221 100644 --- a/tests/test_sdk_client.py +++ b/tests/test_sdk_client.py @@ -9,9 +9,16 @@ from typing import List, Type import pytest from dotenv import load_dotenv -from letta_client import ContinueToolRule, CreateBlock -from letta_client import Letta as LettaSDKClient -from letta_client import LettaRequest, MaxCountPerStepToolRule, MessageCreate, TerminalToolRule, TextContent +from letta_client import ( + ContinueToolRule, + CreateBlock, + Letta as LettaSDKClient, + LettaRequest, + MaxCountPerStepToolRule, + MessageCreate, + TerminalToolRule, + TextContent, +) from letta_client.client import BaseTool from letta_client.core import ApiError from letta_client.types import AgentState, ToolReturnMessage @@ -1019,7 +1026,6 @@ def test_pydantic_inventory_management_tool(e2b_sandbox_mode, client: LettaSDKCl @pytest.mark.parametrize("e2b_sandbox_mode", [False], indirect=True) def test_pydantic_task_planning_tool(e2b_sandbox_mode, client: LettaSDKClient): - class Step(BaseModel): name: str = Field(..., description="Name of the step.") description: str = Field(..., description="An exhaustive description of what this step is trying to achieve.") @@ -1642,7 +1648,9 @@ def test_import_agent_file_from_disk( # Now import from the file with open(file_path, "rb") as f: import_result = client.agents.import_file( - file=f, append_copy_suffix=True, override_existing_tools=True # Use suffix to avoid name conflict + file=f, + append_copy_suffix=True, + override_existing_tools=True, # Use suffix to avoid name conflict ) # Basic verification @@ -1716,9 +1724,9 @@ def test_agent_serialization_v2( assert imported_agent.name == name, f"Agent name mismatch: {imported_agent.name} != {name}" # LLM and embedding configs should be preserved - assert ( - imported_agent.llm_config.model == temp_agent.llm_config.model - ), f"LLM model mismatch: {imported_agent.llm_config.model} != {temp_agent.llm_config.model}" + assert imported_agent.llm_config.model == temp_agent.llm_config.model, ( + f"LLM model mismatch: {imported_agent.llm_config.model} != {temp_agent.llm_config.model}" + ) assert imported_agent.embedding_config.embedding_model == temp_agent.embedding_config.embedding_model, "Embedding model mismatch" # System prompt should be preserved @@ -1728,9 +1736,9 @@ def test_agent_serialization_v2( assert set(imported_agent.tags) == set(temp_agent.tags), f"Tags mismatch: {imported_agent.tags} != {temp_agent.tags}" # Agent type should be preserved - assert ( - imported_agent.agent_type == temp_agent.agent_type - ), f"Agent type mismatch: {imported_agent.agent_type} != {temp_agent.agent_type}" + assert imported_agent.agent_type == temp_agent.agent_type, ( + f"Agent type mismatch: {imported_agent.agent_type} != {temp_agent.agent_type}" + ) # ========== MEMORY BLOCKS ========== # Compare memory blocks directly from AgentState objects @@ -1757,9 +1765,9 @@ def test_agent_serialization_v2( # Check context block assert "project_context" in imported_blocks_by_label, "Context block missing in imported agent" assert "financial markets" in imported_blocks_by_label["project_context"].value, "Context block content not preserved" - assert ( - imported_blocks_by_label["project_context"].limit == original_blocks_by_label["project_context"].limit - ), "Context block limit mismatch" + assert imported_blocks_by_label["project_context"].limit == original_blocks_by_label["project_context"].limit, ( + "Context block limit mismatch" + ) # ========== TOOLS ========== # Compare tools directly from AgentState objects @@ -1799,9 +1807,9 @@ def test_agent_serialization_v2( imported_user_msgs = [msg for msg in imported_messages if msg.message_type == "user_message" and "Test message" in msg.content] # Should have the same number of test messages - assert len(imported_user_msgs) == len( - original_user_msgs - ), f"User message count mismatch: {len(imported_user_msgs)} != {len(original_user_msgs)}" + assert len(imported_user_msgs) == len(original_user_msgs), ( + f"User message count mismatch: {len(imported_user_msgs)} != {len(original_user_msgs)}" + ) # Verify test message content is preserved if len(original_user_msgs) > 0 and len(imported_user_msgs) > 0: @@ -1903,7 +1911,9 @@ def test_import_agent_with_files_from_disk(client: LettaSDKClient): # Now import from the file with open(file_path, "rb") as f: import_result = client.agents.import_file( - file=f, append_copy_suffix=True, override_existing_tools=True # Use suffix to avoid name conflict + file=f, + append_copy_suffix=True, + override_existing_tools=True, # Use suffix to avoid name conflict ) # Verify import was successful diff --git a/tests/test_server.py b/tests/test_server.py index f8e68df1..48da0d91 100644 --- a/tests/test_server.py +++ b/tests/test_server.py @@ -630,7 +630,6 @@ async def test_memory_rebuild_count(server, user, disable_e2b_api_key, base_tool ) def count_system_messages_in_recall() -> Tuple[int, List[LettaMessage]]: - # At this stage, there should only be 1 system message inside of recall storage letta_messages = server.get_agent_recall( user_id=user.id, diff --git a/tests/test_sonnet_nonnative_reasoning_buffering.py b/tests/test_sonnet_nonnative_reasoning_buffering.py index a61254ca..ed7628af 100755 --- a/tests/test_sonnet_nonnative_reasoning_buffering.py +++ b/tests/test_sonnet_nonnative_reasoning_buffering.py @@ -152,7 +152,9 @@ def test_streaming_buffering_behavior(client: Letta, agent_factory, model: str, # Create the stream response_stream = client.agents.messages.create_stream( - agent_id=agent.id, messages=[MessageCreate(role="user", content=user_message)], stream_tokens=True # Enable token streaming + agent_id=agent.id, + messages=[MessageCreate(role="user", content=user_message)], + stream_tokens=True, # Enable token streaming ) # Collect chunks with timestamps @@ -215,7 +217,7 @@ def test_streaming_buffering_behavior(client: Letta, agent_factory, model: str, traceback.print_exc() # Analyze results - print(f"\n=== Analysis ===") + print("\n=== Analysis ===") print(f"Total chunks: {len(chunks_with_time)}") print(f"Reasoning chunks: {len(reasoning_chunks)}") print(f"Assistant chunks: {len(assistant_chunks)}") @@ -227,24 +229,24 @@ def test_streaming_buffering_behavior(client: Letta, agent_factory, model: str, print(f"\nReasoning bursts detected: {len(reasoning_bursts)}") for i, burst in enumerate(reasoning_bursts): burst_times = [reasoning_chunks[idx][0] for idx in burst] - print(f" Burst {i+1}: {len(burst)} chunks from {burst_times[0]:.3f}s to {burst_times[-1]:.3f}s") + print(f" Burst {i + 1}: {len(burst)} chunks from {burst_times[0]:.3f}s to {burst_times[-1]:.3f}s") if assistant_chunks: assistant_bursts = detect_burst_chunks(assistant_chunks) print(f"\nAssistant bursts detected: {len(assistant_bursts)}") for i, burst in enumerate(assistant_bursts): burst_times = [assistant_chunks[idx][0] for idx in burst] - print(f" Burst {i+1}: {len(burst)} chunks from {burst_times[0]:.3f}s to {burst_times[-1]:.3f}s") + print(f" Burst {i + 1}: {len(burst)} chunks from {burst_times[0]:.3f}s to {burst_times[-1]:.3f}s") if tool_chunks: tool_bursts = detect_burst_chunks(tool_chunks) print(f"\nTool call bursts detected: {len(tool_bursts)}") for i, burst in enumerate(tool_bursts): burst_times = [tool_chunks[idx][0] for idx in burst] - print(f" Burst {i+1}: {len(burst)} chunks from {burst_times[0]:.3f}s to {burst_times[-1]:.3f}s") + print(f" Burst {i + 1}: {len(burst)} chunks from {burst_times[0]:.3f}s to {burst_times[-1]:.3f}s") # Analyze results based on expected behavior - print(f"\n=== Test Results ===") + print("\n=== Test Results ===") # Check if we detected large bursts has_significant_bursts = False diff --git a/tests/test_sources.py b/tests/test_sources.py index 55bb4434..497405f6 100644 --- a/tests/test_sources.py +++ b/tests/test_sources.py @@ -8,10 +8,7 @@ from datetime import datetime, timedelta import pytest from dotenv import load_dotenv -from letta_client import CreateBlock -from letta_client import Letta as LettaSDKClient -from letta_client import LettaRequest -from letta_client import MessageCreate as ClientMessageCreate +from letta_client import CreateBlock, Letta as LettaSDKClient, LettaRequest, MessageCreate as ClientMessageCreate from letta_client.types import AgentState from letta.constants import DEFAULT_ORG_ID, FILES_TOOLS @@ -453,7 +450,7 @@ def test_agent_uses_open_close_file_correctly(disable_pinecone, client: LettaSDK assert "6: " in new_value, f"Expected line 6 to be present, got: {new_value[:200]}..." assert "10: " in new_value, f"Expected line 10 to be present, got: {new_value}" - print(f"Comparing content ranges:") + print("Comparing content ranges:") print(f" First range (offset=0, length=5): '{old_value}'") print(f" Second range (offset=5, length=5): '{new_value}'") @@ -494,7 +491,7 @@ def test_agent_uses_search_files_correctly(disable_pinecone, client: LettaSDKCli agent_id=agent_state.id, messages=[ MessageCreate( - role="user", content=f"Use ONLY the semantic_search_files tool to search for details regarding the electoral history." + role="user", content="Use ONLY the semantic_search_files tool to search for details regarding the electoral history." ) ], ) @@ -538,7 +535,7 @@ def test_agent_uses_grep_correctly_basic(disable_pinecone, client: LettaSDKClien # Ask agent to use the semantic_search_files tool search_files_response = client.agents.messages.create( agent_id=agent_state.id, - messages=[MessageCreate(role="user", content=f"Use ONLY the grep_files tool to search for `Nunzia De Girolamo`.")], + messages=[MessageCreate(role="user", content="Use ONLY the grep_files tool to search for `Nunzia De Girolamo`.")], ) print(f"Grep request sent, got {len(search_files_response.messages)} message(s) in response") print(search_files_response.messages) @@ -581,7 +578,7 @@ def test_agent_uses_grep_correctly_advanced(disable_pinecone, client: LettaSDKCl search_files_response = client.agents.messages.create( agent_id=agent_state.id, messages=[ - MessageCreate(role="user", content=f"Use ONLY the grep_files tool to search for `tool-f5b80b08-5a45-4a0a-b2cd-dd8a0177b7ef`.") + MessageCreate(role="user", content="Use ONLY the grep_files tool to search for `tool-f5b80b08-5a45-4a0a-b2cd-dd8a0177b7ef`.") ], ) print(f"Grep request sent, got {len(search_files_response.messages)} message(s) in response") @@ -742,9 +739,9 @@ def test_duplicate_file_renaming(disable_pinecone, client: LettaSDKClient): for file in files: assert file.original_file_name == "test.txt", f"Expected original_file_name='test.txt', got '{file.original_file_name}'" - print(f"✓ Successfully tested duplicate file renaming:") + print("✓ Successfully tested duplicate file renaming:") for i, file in enumerate(files): - print(f" File {i+1}: original='{file.original_file_name}' → renamed='{file.file_name}'") + print(f" File {i + 1}: original='{file.original_file_name}' → renamed='{file.file_name}'") def test_duplicate_file_handling_replace(disable_pinecone, client: LettaSDKClient): @@ -817,9 +814,9 @@ def test_duplicate_file_handling_replace(disable_pinecone, client: LettaSDKClien replacement_block_content = updated_file_blocks[0].value assert replacement_content in replacement_block_content, f"Expected replacement content in block, got: {replacement_block_content}" - assert ( - original_content not in replacement_block_content - ), f"Original content should not be present after replacement: {replacement_block_content}" + assert original_content not in replacement_block_content, ( + f"Original content should not be present after replacement: {replacement_block_content}" + ) print("✓ Successfully tested DuplicateFileHandling.REPLACE functionality") @@ -948,17 +945,17 @@ def test_open_files_schema_descriptions(disable_pinecone, client: LettaSDKClient assert "file_requests" in properties file_requests_prop = properties["file_requests"] expected_file_requests_desc = "List of file open requests, each specifying file name and optional view range." - assert ( - file_requests_prop["description"] == expected_file_requests_desc - ), f"Expected file_requests description: '{expected_file_requests_desc}', got: '{file_requests_prop['description']}'" + assert file_requests_prop["description"] == expected_file_requests_desc, ( + f"Expected file_requests description: '{expected_file_requests_desc}', got: '{file_requests_prop['description']}'" + ) # Check close_all_others parameter assert "close_all_others" in properties close_all_others_prop = properties["close_all_others"] expected_close_all_others_desc = "If True, closes all other currently open files first. Defaults to False." - assert ( - close_all_others_prop["description"] == expected_close_all_others_desc - ), f"Expected close_all_others description: '{expected_close_all_others_desc}', got: '{close_all_others_prop['description']}'" + assert close_all_others_prop["description"] == expected_close_all_others_desc, ( + f"Expected close_all_others description: '{expected_close_all_others_desc}', got: '{close_all_others_prop['description']}'" + ) # Check that file_requests is an array type assert file_requests_prop["type"] == "array", f"Expected file_requests type to be 'array', got: '{file_requests_prop['type']}'" @@ -1031,29 +1028,29 @@ def test_grep_files_schema_descriptions(disable_pinecone, client: LettaSDKClient assert "pattern" in properties pattern_prop = properties["pattern"] expected_pattern_desc = "Keyword or regex pattern to search within file contents." - assert ( - pattern_prop["description"] == expected_pattern_desc - ), f"Expected pattern description: '{expected_pattern_desc}', got: '{pattern_prop['description']}'" + assert pattern_prop["description"] == expected_pattern_desc, ( + f"Expected pattern description: '{expected_pattern_desc}', got: '{pattern_prop['description']}'" + ) assert pattern_prop["type"] == "string" # Check include parameter assert "include" in properties include_prop = properties["include"] expected_include_desc = "Optional keyword or regex pattern to filter filenames to include in the search." - assert ( - include_prop["description"] == expected_include_desc - ), f"Expected include description: '{expected_include_desc}', got: '{include_prop['description']}'" + assert include_prop["description"] == expected_include_desc, ( + f"Expected include description: '{expected_include_desc}', got: '{include_prop['description']}'" + ) assert include_prop["type"] == "string" # Check context_lines parameter assert "context_lines" in properties context_lines_prop = properties["context_lines"] expected_context_lines_desc = ( - "Number of lines of context to show before and after each match.\n" "Equivalent to `-C` in grep_files. Defaults to 1." + "Number of lines of context to show before and after each match.\nEquivalent to `-C` in grep_files. Defaults to 1." + ) + assert context_lines_prop["description"] == expected_context_lines_desc, ( + f"Expected context_lines description: '{expected_context_lines_desc}', got: '{context_lines_prop['description']}'" ) - assert ( - context_lines_prop["description"] == expected_context_lines_desc - ), f"Expected context_lines description: '{expected_context_lines_desc}', got: '{context_lines_prop['description']}'" assert context_lines_prop["type"] == "integer" # Check offset parameter @@ -1066,9 +1063,9 @@ def test_grep_files_schema_descriptions(disable_pinecone, client: LettaSDKClient "offset=40 for third page, etc. The tool will tell you the exact\n" "offset to use for the next page." ) - assert ( - offset_prop["description"] == expected_offset_desc - ), f"Expected offset description: '{expected_offset_desc}', got: '{offset_prop['description']}'" + assert offset_prop["description"] == expected_offset_desc, ( + f"Expected offset description: '{expected_offset_desc}', got: '{offset_prop['description']}'" + ) assert offset_prop["type"] == "integer" # Check return description in main description @@ -1128,9 +1125,9 @@ def test_pinecone_search_files_tool(client: LettaSDKClient): # Check that results contain expected content search_results = tool_returns[0].tool_return print(search_results) - assert ( - "electoral" in search_results.lower() or "history" in search_results.lower() - ), f"Search results should contain relevant content: {search_results}" + assert "electoral" in search_results.lower() or "history" in search_results.lower(), ( + f"Search results should contain relevant content: {search_results}" + ) def test_pinecone_list_files_status(client: LettaSDKClient): @@ -1160,9 +1157,9 @@ def test_pinecone_list_files_status(client: LettaSDKClient): # verify embedding counts for files that have chunks if file_metadata.total_chunks and file_metadata.total_chunks > 0: - assert ( - file_metadata.chunks_embedded == file_metadata.total_chunks - ), f"File {file_metadata.file_name} should have all chunks embedded: {file_metadata.chunks_embedded}/{file_metadata.total_chunks}" + assert file_metadata.chunks_embedded == file_metadata.total_chunks, ( + f"File {file_metadata.file_name} should have all chunks embedded: {file_metadata.chunks_embedded}/{file_metadata.total_chunks}" + ) # cleanup client.sources.delete(source_id=source.id) @@ -1234,9 +1231,9 @@ def test_pinecone_lifecycle_file_and_source_deletion(client: LettaSDKClient): print(f"Found {len(records_after)} records for files after source deletion") - assert ( - len(records_after) == 0 - ), f"All source records should be removed from Pinecone after source deletion, but found {len(records_after)}" + assert len(records_after) == 0, ( + f"All source records should be removed from Pinecone after source deletion, but found {len(records_after)}" + ) def test_agent_open_file(disable_pinecone, client: LettaSDKClient, agent_state: AgentState): diff --git a/tests/test_stream_buffer_readers.py b/tests/test_stream_buffer_readers.py index 9a0bb5e8..cee0aeb2 100644 --- a/tests/test_stream_buffer_readers.py +++ b/tests/test_stream_buffer_readers.py @@ -75,12 +75,12 @@ def test_inner_thoughts_in_args_simple(wait_for_first_key): for idx, (fragment, expected) in enumerate(zip(fragments1, expected_updates1)): updates_main_json, updates_inner_thoughts = handler1.process_fragment(fragment) # Assertions - assert ( - updates_main_json == expected["main_json_update"] - ), f"Test Case 1, Fragment {idx+1}: Main JSON update mismatch.\nExpected: '{expected['main_json_update']}'\nGot: '{updates_main_json}'" - assert ( - updates_inner_thoughts == expected["inner_thoughts_update"] - ), f"Test Case 1, Fragment {idx+1}: Inner Thoughts update mismatch.\nExpected: '{expected['inner_thoughts_update']}'\nGot: '{updates_inner_thoughts}'" + assert updates_main_json == expected["main_json_update"], ( + f"Test Case 1, Fragment {idx + 1}: Main JSON update mismatch.\nExpected: '{expected['main_json_update']}'\nGot: '{updates_main_json}'" + ) + assert updates_inner_thoughts == expected["inner_thoughts_update"], ( + f"Test Case 1, Fragment {idx + 1}: Inner Thoughts update mismatch.\nExpected: '{expected['inner_thoughts_update']}'\nGot: '{updates_inner_thoughts}'" + ) @pytest.mark.parametrize("wait_for_first_key", [True, False]) @@ -182,12 +182,12 @@ def test_inner_thoughts_in_args_trailing_quote(wait_for_first_key): for idx, (fragment, expected) in enumerate(zip(fragments1, expected_updates1)): updates_main_json, updates_inner_thoughts = handler1.process_fragment(fragment) # Assertions - assert ( - updates_main_json == expected["main_json_update"] - ), f"Test Case 1, Fragment {idx+1}: Main JSON update mismatch.\nFragment: '{fragment}'\nExpected: '{expected['main_json_update']}'\nGot: '{updates_main_json}'\nCurrent JSON: '{current_main_json}'\nCurrent Inner Thoughts: '{current_inner_thoughts}'" - assert ( - updates_inner_thoughts == expected["inner_thoughts_update"] - ), f"Test Case 1, Fragment {idx+1}: Inner Thoughts update mismatch.\nExpected: '{expected['inner_thoughts_update']}'\nGot: '{updates_inner_thoughts}'\nCurrent JSON: '{current_main_json}'\nCurrent Inner Thoughts: '{current_inner_thoughts}'" + assert updates_main_json == expected["main_json_update"], ( + f"Test Case 1, Fragment {idx + 1}: Main JSON update mismatch.\nFragment: '{fragment}'\nExpected: '{expected['main_json_update']}'\nGot: '{updates_main_json}'\nCurrent JSON: '{current_main_json}'\nCurrent Inner Thoughts: '{current_inner_thoughts}'" + ) + assert updates_inner_thoughts == expected["inner_thoughts_update"], ( + f"Test Case 1, Fragment {idx + 1}: Inner Thoughts update mismatch.\nExpected: '{expected['inner_thoughts_update']}'\nGot: '{updates_inner_thoughts}'\nCurrent JSON: '{current_main_json}'\nCurrent Inner Thoughts: '{current_inner_thoughts}'" + ) current_main_json += updates_main_json current_inner_thoughts += updates_inner_thoughts @@ -227,20 +227,20 @@ def test_inner_thoughts_not_in_args(): for idx, (fragment, expected) in enumerate(zip(fragments2, expected_updates2)): updates_main_json, updates_inner_thoughts = handler2.process_fragment(fragment) # Assertions - assert ( - updates_main_json == expected["main_json_update"] - ), f"Test Case 2, Fragment {idx+1}: Main JSON update mismatch.\nExpected: '{expected['main_json_update']}'\nGot: '{updates_main_json}'" - assert ( - updates_inner_thoughts == expected["inner_thoughts_update"] - ), f"Test Case 2, Fragment {idx+1}: Inner Thoughts update mismatch.\nExpected: '{expected['inner_thoughts_update']}'\nGot: '{updates_inner_thoughts}'" + assert updates_main_json == expected["main_json_update"], ( + f"Test Case 2, Fragment {idx + 1}: Main JSON update mismatch.\nExpected: '{expected['main_json_update']}'\nGot: '{updates_main_json}'" + ) + assert updates_inner_thoughts == expected["inner_thoughts_update"], ( + f"Test Case 2, Fragment {idx + 1}: Inner Thoughts update mismatch.\nExpected: '{expected['inner_thoughts_update']}'\nGot: '{updates_inner_thoughts}'" + ) # Final assertions for Test Case 2 expected_final_main_json2 = '{"message":"Here we are again, with \'x2\'! 🎉 Let\'s take this chance: If you could swap lives with any fictional character for a day, who would it be?"}' expected_final_inner_thoughts2 = "" - assert ( - handler2.main_json == expected_final_main_json2 - ), f"Test Case 2: Final main_json mismatch.\nExpected: '{expected_final_main_json2}'\nGot: '{handler2.main_json}'" - assert ( - handler2.inner_thoughts == expected_final_inner_thoughts2 - ), f"Test Case 2: Final inner_thoughts mismatch.\nExpected: '{expected_final_inner_thoughts2}'\nGot: '{handler2.inner_thoughts}'" + assert handler2.main_json == expected_final_main_json2, ( + f"Test Case 2: Final main_json mismatch.\nExpected: '{expected_final_main_json2}'\nGot: '{handler2.main_json}'" + ) + assert handler2.inner_thoughts == expected_final_inner_thoughts2, ( + f"Test Case 2: Final inner_thoughts mismatch.\nExpected: '{expected_final_inner_thoughts2}'\nGot: '{handler2.inner_thoughts}'" + ) diff --git a/tests/test_timezone_formatting.py b/tests/test_timezone_formatting.py index 878799a1..b142b92a 100644 --- a/tests/test_timezone_formatting.py +++ b/tests/test_timezone_formatting.py @@ -68,9 +68,9 @@ class TestTimezoneFormatting: # Check that times are within tolerance time_diff = abs((parsed_time - current_time_in_tz).total_seconds()) - assert ( - time_diff <= tolerance_minutes * 60 - ), f"Time difference too large: {time_diff}s. Parsed: {parsed_time}, Expected timezone: {current_time_in_tz}" + assert time_diff <= tolerance_minutes * 60, ( + f"Time difference too large: {time_diff}s. Parsed: {parsed_time}, Expected timezone: {current_time_in_tz}" + ) # Verify timezone info exists and format looks reasonable assert parsed_time.tzinfo is not None, "Parsed time should have timezone info" diff --git a/tests/test_tool_rule_solver.py b/tests/test_tool_rule_solver.py index 54077563..932cb1ea 100644 --- a/tests/test_tool_rule_solver.py +++ b/tests/test_tool_rule_solver.py @@ -104,12 +104,12 @@ def test_conditional_tool_rule(): assert solver.get_allowed_tool_names({START_TOOL}) == [START_TOOL], "Initial allowed tool should be 'start_tool'" solver.register_tool_call(START_TOOL) - assert solver.get_allowed_tool_names({END_TOOL}, last_function_response='{"message": "true"}') == [ - END_TOOL - ], "After 'start_tool' returns true, should allow 'end_tool'" - assert solver.get_allowed_tool_names({START_TOOL}, last_function_response='{"message": "false"}') == [ - START_TOOL - ], "After 'start_tool' returns false, should allow 'start_tool'" + assert solver.get_allowed_tool_names({END_TOOL}, last_function_response='{"message": "true"}') == [END_TOOL], ( + "After 'start_tool' returns true, should allow 'end_tool'" + ) + assert solver.get_allowed_tool_names({START_TOOL}, last_function_response='{"message": "false"}') == [START_TOOL], ( + "After 'start_tool' returns false, should allow 'start_tool'" + ) assert solver.is_terminal_tool(END_TOOL) is True, "Should recognize 'end_tool' as terminal" @@ -148,9 +148,9 @@ def test_max_count_per_step_tool_rule(): assert solver.get_allowed_tool_names({START_TOOL}) == [START_TOOL], "After first use, should still allow 'start_tool'" solver.register_tool_call(START_TOOL) - assert ( - solver.get_allowed_tool_names({START_TOOL}, error_on_empty=False) == [] - ), "After reaching max count, 'start_tool' should no longer be allowed" + assert solver.get_allowed_tool_names({START_TOOL}, error_on_empty=False) == [], ( + "After reaching max count, 'start_tool' should no longer be allowed" + ) def test_max_count_per_step_tool_rule_allows_usage_up_to_limit(): @@ -179,9 +179,9 @@ def test_max_count_per_step_tool_rule_does_not_affect_other_tools(): solver.register_tool_call(START_TOOL) solver.register_tool_call(START_TOOL) - assert sorted(solver.get_allowed_tool_names({START_TOOL, NEXT_TOOL, HELPER_TOOL})) == sorted( - [NEXT_TOOL, HELPER_TOOL] - ), "Other tools should still be allowed even if 'start_tool' is over limit" + assert sorted(solver.get_allowed_tool_names({START_TOOL, NEXT_TOOL, HELPER_TOOL})) == sorted([NEXT_TOOL, HELPER_TOOL]), ( + "Other tools should still be allowed even if 'start_tool' is over limit" + ) def test_max_count_per_step_tool_rule_resets_on_clear(): @@ -507,31 +507,31 @@ def test_required_before_exit_tool_rule_multiple_required_tools(): required_rule_2 = RequiredBeforeExitToolRule(tool_name=REQUIRED_TOOL_2) solver = ToolRulesSolver(tool_rules=[required_rule_1, required_rule_2]) - assert ( - solver.has_required_tools_been_called({REQUIRED_TOOL_1, REQUIRED_TOOL_2}) is False - ), "Should return False when no required tools have been called" + assert solver.has_required_tools_been_called({REQUIRED_TOOL_1, REQUIRED_TOOL_2}) is False, ( + "Should return False when no required tools have been called" + ) uncalled_tools = solver.get_uncalled_required_tools({REQUIRED_TOOL_1, REQUIRED_TOOL_2}) assert set(uncalled_tools) == {REQUIRED_TOOL_1, REQUIRED_TOOL_2}, "Should return both uncalled required tools" # Call first required tool solver.register_tool_call(REQUIRED_TOOL_1) - assert ( - solver.has_required_tools_been_called({REQUIRED_TOOL_1, REQUIRED_TOOL_2}) is False - ), "Should return False when only one required tool has been called" - assert solver.get_uncalled_required_tools({REQUIRED_TOOL_1, REQUIRED_TOOL_2}) == [ - REQUIRED_TOOL_2 - ], "Should return remaining uncalled required tool" + assert solver.has_required_tools_been_called({REQUIRED_TOOL_1, REQUIRED_TOOL_2}) is False, ( + "Should return False when only one required tool has been called" + ) + assert solver.get_uncalled_required_tools({REQUIRED_TOOL_1, REQUIRED_TOOL_2}) == [REQUIRED_TOOL_2], ( + "Should return remaining uncalled required tool" + ) # Call second required tool solver.register_tool_call(REQUIRED_TOOL_2) - assert ( - solver.has_required_tools_been_called({REQUIRED_TOOL_1, REQUIRED_TOOL_2}) is True - ), "Should return True when all required tools have been called" - assert ( - solver.get_uncalled_required_tools({REQUIRED_TOOL_1, REQUIRED_TOOL_2}) == [] - ), "Should return empty list when all required tools have been called" + assert solver.has_required_tools_been_called({REQUIRED_TOOL_1, REQUIRED_TOOL_2}) is True, ( + "Should return True when all required tools have been called" + ) + assert solver.get_uncalled_required_tools({REQUIRED_TOOL_1, REQUIRED_TOOL_2}) == [], ( + "Should return empty list when all required tools have been called" + ) def test_required_before_exit_tool_rule_mixed_with_other_tools(): diff --git a/tests/test_tool_sandbox/restaurant_management_system/adjust_menu_prices.py b/tests/test_tool_sandbox/restaurant_management_system/adjust_menu_prices.py index 1d3d9d3e..ffe734b3 100644 --- a/tests/test_tool_sandbox/restaurant_management_system/adjust_menu_prices.py +++ b/tests/test_tool_sandbox/restaurant_management_system/adjust_menu_prices.py @@ -8,10 +8,9 @@ def adjust_menu_prices(percentage: float) -> str: str: A formatted string summarizing the price adjustments. """ import cowsay - from tqdm import tqdm - from core.menu import Menu, MenuItem # Import a class from the codebase from core.utils import format_currency # Use a utility function to test imports + from tqdm import tqdm if not isinstance(percentage, (int, float)): raise TypeError("percentage must be a number") diff --git a/tests/test_tool_sandbox/restaurant_management_system/test.py b/tests/test_tool_sandbox/restaurant_management_system/test.py index feefcc83..16c52a07 100644 --- a/tests/test_tool_sandbox/restaurant_management_system/test.py +++ b/tests/test_tool_sandbox/restaurant_management_system/test.py @@ -11,9 +11,9 @@ def generate_and_execute_tool(tool_name: str, args: dict): with open(script_path, "w") as script_file: script_file.write(f"from restaurant_management_system.tools.{tool_name} import {tool_name}\n\n") arg_str = ", ".join([f"{key}={repr(value)}" for key, value in args.items()]) - script_file.write(f"if __name__ == '__main__':\n") + script_file.write("if __name__ == '__main__':\n") script_file.write(f" result = {tool_name}({arg_str})\n") - script_file.write(f" print(result)\n") + script_file.write(" print(result)\n") # Execute the script runpy.run_path(script_path, run_name="__main__") diff --git a/uv.lock b/uv.lock index 6a814594..a405d816 100644 --- a/uv.lock +++ b/uv.lock @@ -1,5 +1,5 @@ version = 1 -revision = 2 +revision = 3 requires-python = ">=3.11, <3.14" resolution-markers = [ "python_full_version >= '3.13'", @@ -337,18 +337,6 @@ wheels = [ { url = "https://files.pythonhosted.org/packages/77/06/bb80f5f86020c4551da315d78b3ab75e8228f89f0162f2c3a819e407941a/attrs-25.3.0-py3-none-any.whl", hash = "sha256:427318ce031701fea540783410126f03899a97ffc6f61596ad581ac2e40e3bc3", size = 63815, upload-time = "2025-03-13T11:10:21.14Z" }, ] -[[package]] -name = "autoflake" -version = "2.3.1" -source = { registry = "https://pypi.org/simple" } -dependencies = [ - { name = "pyflakes" }, -] -sdist = { url = "https://files.pythonhosted.org/packages/2a/cb/486f912d6171bc5748c311a2984a301f4e2d054833a1da78485866c71522/autoflake-2.3.1.tar.gz", hash = "sha256:c98b75dc5b0a86459c4f01a1d32ac7eb4338ec4317a4469515ff1e687ecd909e", size = 27642, upload-time = "2024-03-13T03:41:28.977Z" } -wheels = [ - { url = "https://files.pythonhosted.org/packages/a2/ee/3fd29bf416eb4f1c5579cf12bf393ae954099258abd7bde03c4f9716ef6b/autoflake-2.3.1-py3-none-any.whl", hash = "sha256:3ae7495db9084b7b32818b4140e6dc4fc280b712fb414f5b8fe57b0a8e85a840", size = 32483, upload-time = "2024-03-13T03:41:26.969Z" }, -] - [[package]] name = "banks" version = "2.2.0" @@ -2382,6 +2370,7 @@ dependencies = [ { name = "pyyaml" }, { name = "questionary" }, { name = "rich" }, + { name = "ruff" }, { name = "sentry-sdk", extra = ["fastapi"] }, { name = "setuptools" }, { name = "sqlalchemy", extra = ["asyncio"] }, @@ -2416,11 +2405,8 @@ desktop = [ { name = "wikipedia" }, ] dev = [ - { name = "autoflake" }, - { name = "black", extra = ["jupyter"] }, { name = "ipdb" }, { name = "ipykernel" }, - { name = "isort" }, { name = "pexpect" }, { name = "pre-commit" }, { name = "pyright" }, @@ -2482,9 +2468,7 @@ requires-dist = [ { name = "anthropic", specifier = ">=0.49.0" }, { name = "apscheduler", specifier = ">=3.11.0" }, { name = "asyncpg", marker = "extra == 'postgres'", specifier = ">=0.30.0" }, - { name = "autoflake", marker = "extra == 'dev'", specifier = ">=2.3.0" }, { name = "black", extras = ["jupyter"], specifier = ">=24.2.0" }, - { name = "black", extras = ["jupyter"], marker = "extra == 'dev'", specifier = ">=24.4.2" }, { name = "boto3", marker = "extra == 'bedrock'", specifier = ">=1.36.24" }, { name = "brotli", specifier = ">=1.1.0" }, { name = "certifi", specifier = ">=2025.6.15" }, @@ -2511,7 +2495,6 @@ requires-dist = [ { name = "httpx-sse", specifier = ">=0.4.0" }, { name = "ipdb", marker = "extra == 'dev'", specifier = ">=0.13.13" }, { name = "ipykernel", marker = "extra == 'dev'", specifier = ">=6.29.5" }, - { name = "isort", marker = "extra == 'dev'", specifier = ">=5.13.2" }, { name = "jinja2", specifier = ">=3.1.5" }, { name = "langchain", marker = "extra == 'desktop'", specifier = ">=0.3.7" }, { name = "langchain", marker = "extra == 'external-tools'", specifier = ">=0.3.7" }, @@ -2562,6 +2545,7 @@ requires-dist = [ { name = "questionary", specifier = ">=2.0.1" }, { name = "redis", marker = "extra == 'redis'", specifier = ">=6.2.0" }, { name = "rich", specifier = ">=13.9.4" }, + { name = "ruff", extras = ["dev"], specifier = ">=0.12.10" }, { name = "sentry-sdk", extras = ["fastapi"], specifier = "==2.19.1" }, { name = "setuptools", specifier = ">=70" }, { name = "sqlalchemy", extras = ["asyncio"], specifier = ">=2.0.41" }, @@ -4443,15 +4427,6 @@ wheels = [ { url = "https://files.pythonhosted.org/packages/58/f0/427018098906416f580e3cf1366d3b1abfb408a0652e9f31600c24a1903c/pydantic_settings-2.10.1-py3-none-any.whl", hash = "sha256:a60952460b99cf661dc25c29c0ef171721f98bfcb52ef8d9ea4c943d7c8cc796", size = 45235, upload-time = "2025-06-24T13:26:45.485Z" }, ] -[[package]] -name = "pyflakes" -version = "3.4.0" -source = { registry = "https://pypi.org/simple" } -sdist = { url = "https://files.pythonhosted.org/packages/45/dc/fd034dc20b4b264b3d015808458391acbf9df40b1e54750ef175d39180b1/pyflakes-3.4.0.tar.gz", hash = "sha256:b24f96fafb7d2ab0ec5075b7350b3d2d2218eab42003821c06344973d3ea2f58", size = 64669, upload-time = "2025-06-20T18:45:27.834Z" } -wheels = [ - { url = "https://files.pythonhosted.org/packages/c2/2f/81d580a0fb83baeb066698975cb14a618bdbed7720678566f1b046a95fe8/pyflakes-3.4.0-py2.py3-none-any.whl", hash = "sha256:f742a7dbd0d9cb9ea41e9a24a918996e8170c799fa528688d40dd582c8265f4f", size = 63551, upload-time = "2025-06-20T18:45:26.937Z" }, -] - [[package]] name = "pygments" version = "2.19.2" @@ -5044,6 +5019,32 @@ wheels = [ { url = "https://files.pythonhosted.org/packages/64/8d/0133e4eb4beed9e425d9a98ed6e081a55d195481b7632472be1af08d2f6b/rsa-4.9.1-py3-none-any.whl", hash = "sha256:68635866661c6836b8d39430f97a996acbd61bfa49406748ea243539fe239762", size = 34696, upload-time = "2025-04-16T09:51:17.142Z" }, ] +[[package]] +name = "ruff" +version = "0.12.10" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/3b/eb/8c073deb376e46ae767f4961390d17545e8535921d2f65101720ed8bd434/ruff-0.12.10.tar.gz", hash = "sha256:189ab65149d11ea69a2d775343adf5f49bb2426fc4780f65ee33b423ad2e47f9", size = 5310076, upload-time = "2025-08-21T18:23:22.595Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/24/e7/560d049d15585d6c201f9eeacd2fd130def3741323e5ccf123786e0e3c95/ruff-0.12.10-py3-none-linux_armv6l.whl", hash = "sha256:8b593cb0fb55cc8692dac7b06deb29afda78c721c7ccfed22db941201b7b8f7b", size = 11935161, upload-time = "2025-08-21T18:22:26.965Z" }, + { url = "https://files.pythonhosted.org/packages/d1/b0/ad2464922a1113c365d12b8f80ed70fcfb39764288ac77c995156080488d/ruff-0.12.10-py3-none-macosx_10_12_x86_64.whl", hash = "sha256:ebb7333a45d56efc7c110a46a69a1b32365d5c5161e7244aaf3aa20ce62399c1", size = 12660884, upload-time = "2025-08-21T18:22:30.925Z" }, + { url = "https://files.pythonhosted.org/packages/d7/f1/97f509b4108d7bae16c48389f54f005b62ce86712120fd8b2d8e88a7cb49/ruff-0.12.10-py3-none-macosx_11_0_arm64.whl", hash = "sha256:d59e58586829f8e4a9920788f6efba97a13d1fa320b047814e8afede381c6839", size = 11872754, upload-time = "2025-08-21T18:22:34.035Z" }, + { url = "https://files.pythonhosted.org/packages/12/ad/44f606d243f744a75adc432275217296095101f83f966842063d78eee2d3/ruff-0.12.10-py3-none-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:822d9677b560f1fdeab69b89d1f444bf5459da4aa04e06e766cf0121771ab844", size = 12092276, upload-time = "2025-08-21T18:22:36.764Z" }, + { url = "https://files.pythonhosted.org/packages/06/1f/ed6c265e199568010197909b25c896d66e4ef2c5e1c3808caf461f6f3579/ruff-0.12.10-py3-none-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:37b4a64f4062a50c75019c61c7017ff598cb444984b638511f48539d3a1c98db", size = 11734700, upload-time = "2025-08-21T18:22:39.822Z" }, + { url = "https://files.pythonhosted.org/packages/63/c5/b21cde720f54a1d1db71538c0bc9b73dee4b563a7dd7d2e404914904d7f5/ruff-0.12.10-py3-none-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:2c6f4064c69d2542029b2a61d39920c85240c39837599d7f2e32e80d36401d6e", size = 13468783, upload-time = "2025-08-21T18:22:42.559Z" }, + { url = "https://files.pythonhosted.org/packages/02/9e/39369e6ac7f2a1848f22fb0b00b690492f20811a1ac5c1fd1d2798329263/ruff-0.12.10-py3-none-manylinux_2_17_ppc64.manylinux2014_ppc64.whl", hash = "sha256:059e863ea3a9ade41407ad71c1de2badfbe01539117f38f763ba42a1206f7559", size = 14436642, upload-time = "2025-08-21T18:22:45.612Z" }, + { url = "https://files.pythonhosted.org/packages/e3/03/5da8cad4b0d5242a936eb203b58318016db44f5c5d351b07e3f5e211bb89/ruff-0.12.10-py3-none-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:1bef6161e297c68908b7218fa6e0e93e99a286e5ed9653d4be71e687dff101cf", size = 13859107, upload-time = "2025-08-21T18:22:48.886Z" }, + { url = "https://files.pythonhosted.org/packages/19/19/dd7273b69bf7f93a070c9cec9494a94048325ad18fdcf50114f07e6bf417/ruff-0.12.10-py3-none-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:4f1345fbf8fb0531cd722285b5f15af49b2932742fc96b633e883da8d841896b", size = 12886521, upload-time = "2025-08-21T18:22:51.567Z" }, + { url = "https://files.pythonhosted.org/packages/c0/1d/b4207ec35e7babaee62c462769e77457e26eb853fbdc877af29417033333/ruff-0.12.10-py3-none-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:1f68433c4fbc63efbfa3ba5db31727db229fa4e61000f452c540474b03de52a9", size = 13097528, upload-time = "2025-08-21T18:22:54.609Z" }, + { url = "https://files.pythonhosted.org/packages/ff/00/58f7b873b21114456e880b75176af3490d7a2836033779ca42f50de3b47a/ruff-0.12.10-py3-none-manylinux_2_31_riscv64.whl", hash = "sha256:141ce3d88803c625257b8a6debf4a0473eb6eed9643a6189b68838b43e78165a", size = 13080443, upload-time = "2025-08-21T18:22:57.413Z" }, + { url = "https://files.pythonhosted.org/packages/12/8c/9e6660007fb10189ccb78a02b41691288038e51e4788bf49b0a60f740604/ruff-0.12.10-py3-none-musllinux_1_2_aarch64.whl", hash = "sha256:f3fc21178cd44c98142ae7590f42ddcb587b8e09a3b849cbc84edb62ee95de60", size = 11896759, upload-time = "2025-08-21T18:23:00.473Z" }, + { url = "https://files.pythonhosted.org/packages/67/4c/6d092bb99ea9ea6ebda817a0e7ad886f42a58b4501a7e27cd97371d0ba54/ruff-0.12.10-py3-none-musllinux_1_2_armv7l.whl", hash = "sha256:7d1a4e0bdfafcd2e3e235ecf50bf0176f74dd37902f241588ae1f6c827a36c56", size = 11701463, upload-time = "2025-08-21T18:23:03.211Z" }, + { url = "https://files.pythonhosted.org/packages/59/80/d982c55e91df981f3ab62559371380616c57ffd0172d96850280c2b04fa8/ruff-0.12.10-py3-none-musllinux_1_2_i686.whl", hash = "sha256:e67d96827854f50b9e3e8327b031647e7bcc090dbe7bb11101a81a3a2cbf1cc9", size = 12691603, upload-time = "2025-08-21T18:23:06.935Z" }, + { url = "https://files.pythonhosted.org/packages/ad/37/63a9c788bbe0b0850611669ec6b8589838faf2f4f959647f2d3e320383ae/ruff-0.12.10-py3-none-musllinux_1_2_x86_64.whl", hash = "sha256:ae479e1a18b439c59138f066ae79cc0f3ee250712a873d00dbafadaad9481e5b", size = 13164356, upload-time = "2025-08-21T18:23:10.225Z" }, + { url = "https://files.pythonhosted.org/packages/47/d4/1aaa7fb201a74181989970ebccd12f88c0fc074777027e2a21de5a90657e/ruff-0.12.10-py3-none-win32.whl", hash = "sha256:9de785e95dc2f09846c5e6e1d3a3d32ecd0b283a979898ad427a9be7be22b266", size = 11896089, upload-time = "2025-08-21T18:23:14.232Z" }, + { url = "https://files.pythonhosted.org/packages/ad/14/2ad38fd4037daab9e023456a4a40ed0154e9971f8d6aed41bdea390aabd9/ruff-0.12.10-py3-none-win_amd64.whl", hash = "sha256:7837eca8787f076f67aba2ca559cefd9c5cbc3a9852fd66186f4201b87c1563e", size = 13004616, upload-time = "2025-08-21T18:23:17.422Z" }, + { url = "https://files.pythonhosted.org/packages/24/3c/21cf283d67af33a8e6ed242396863af195a8a6134ec581524fd22b9811b6/ruff-0.12.10-py3-none-win_arm64.whl", hash = "sha256:cc138cc06ed9d4bfa9d667a65af7172b47840e1a98b02ce7011c391e54635ffc", size = 12074225, upload-time = "2025-08-21T18:23:20.137Z" }, +] + [[package]] name = "s3transfer" version = "0.13.1"