Files
letta-server/letta/agents/base_agent_v2.py
github-actions[bot] 9471fab2cf fix: Add agent_id property to BaseAgentV2 for backward compatibility (#8806)
The LettaAgentV3 (and LettaAgentV2) agents inherit from BaseAgentV2,
which unlike the original BaseAgent class, did not expose an agent_id
attribute. This caused AttributeError: 'LettaAgentV3' object has no
attribute 'agent_id' when code attempted to access self.agent_id.

This fix adds an agent_id property to BaseAgentV2 that returns
self.agent_state.id, maintaining backward compatibility with code
that expects the self.agent_id interface from the original BaseAgent.

Closes #8805

🤖 Generated with [Letta Code](https://letta.com)

Co-authored-by: letta-code <248085862+letta-code@users.noreply.github.com>
Co-authored-by: Letta <noreply@letta.com>
2026-02-24 10:52:06 -08:00

92 lines
3.6 KiB
Python

from abc import ABC, abstractmethod
from typing import TYPE_CHECKING, AsyncGenerator
from letta.constants import DEFAULT_MAX_STEPS
from letta.log import get_logger
from letta.schemas.agent import AgentState
from letta.schemas.enums import MessageStreamStatus
from letta.schemas.letta_message import LegacyLettaMessage, LettaMessage, MessageType
from letta.schemas.letta_response import LettaResponse
from letta.schemas.message import MessageCreate
from letta.schemas.user import User
if TYPE_CHECKING:
from letta.schemas.letta_request import ClientToolSchema
class BaseAgentV2(ABC):
"""
Abstract base class for the main agent execution loop for letta agents, handling
message management, llm api request, tool execution, and context tracking.
"""
def __init__(self, agent_state: AgentState, actor: User):
self.agent_state = agent_state
self.actor = actor
self.logger = get_logger(agent_state.id)
@property
def agent_id(self) -> str:
"""Return the agent ID for backward compatibility with code expecting self.agent_id."""
return self.agent_state.id
@abstractmethod
async def build_request(
self,
input_messages: list[MessageCreate],
) -> dict:
"""
Execute the agent loop in dry_run mode, returning just the generated request
payload sent to the underlying llm provider.
"""
raise NotImplementedError
@abstractmethod
async def step(
self,
input_messages: list[MessageCreate],
max_steps: int = DEFAULT_MAX_STEPS,
run_id: str | None = None,
use_assistant_message: bool = True,
include_return_message_types: list[MessageType] | None = None,
request_start_timestamp_ns: int | None = None,
client_tools: list["ClientToolSchema"] | None = None,
include_compaction_messages: bool = False, # Not used in V2, but accepted for API compatibility
) -> LettaResponse:
"""
Execute the agent loop in blocking mode, returning all messages at once.
Args:
client_tools: Optional list of client-side tools. When called, execution pauses
for client to provide tool returns.
include_compaction_messages: Not used in V2, but accepted for API compatibility.
"""
raise NotImplementedError
@abstractmethod
async def stream(
self,
input_messages: list[MessageCreate],
max_steps: int = DEFAULT_MAX_STEPS,
stream_tokens: bool = False,
run_id: str | None = None,
use_assistant_message: bool = True,
include_return_message_types: list[MessageType] | None = None,
request_start_timestamp_ns: int | None = None,
conversation_id: str | None = None,
client_tools: list["ClientToolSchema"] | None = None,
include_compaction_messages: bool = False, # Not used in V2, but accepted for API compatibility
) -> AsyncGenerator[LettaMessage | LegacyLettaMessage | MessageStreamStatus, None]:
"""
Execute the agent loop in streaming mode, yielding chunks as they become available.
If stream_tokens is True, individual tokens are streamed as they arrive from the LLM,
providing the lowest latency experience, otherwise each complete step (reasoning +
tool call + tool return) is yielded as it completes.
Args:
client_tools: Optional list of client-side tools. When called, execution pauses
for client to provide tool returns.
include_compaction_messages: Not used in V2, but accepted for API compatibility.
"""
raise NotImplementedError