from enum import Enum, auto from typing import Dict, List, Literal, Optional from pydantic import Field from letta.schemas.enums import PrimitiveType, StepStatus from letta.schemas.letta_base import LettaBase from letta.schemas.letta_stop_reason import StopReasonType from letta.schemas.message import Message class StepBase(LettaBase): __id_prefix__ = PrimitiveType.STEP.value class Step(StepBase): id: str = Field(..., description="The id of the step. Assigned by the database.") origin: Optional[str] = Field(None, description="The surface that this agent step was initiated from.") organization_id: Optional[str] = Field(None, description="The unique identifier of the organization associated with the step.") provider_id: Optional[str] = Field(None, description="The unique identifier of the provider that was configured for this step") run_id: Optional[str] = Field( None, description="The unique identifier of the run that this step belongs to. Only included for async calls." ) agent_id: Optional[str] = Field(None, description="The ID of the agent that performed the step.") provider_name: Optional[str] = Field(None, description="The name of the provider used for this step.") provider_category: Optional[str] = Field(None, description="The category of the provider used for this step.") model: Optional[str] = Field(None, description="The name of the model used for this step.") model_endpoint: Optional[str] = Field(None, description="The model endpoint url used for this step.") context_window_limit: Optional[int] = Field(None, description="The context window limit configured for this step.") completion_tokens: Optional[int] = Field(None, description="The number of tokens generated by the agent during this step.") prompt_tokens: Optional[int] = Field(None, description="The number of tokens in the prompt during this step.") total_tokens: Optional[int] = Field(None, description="The total number of tokens processed by the agent during this step.") completion_tokens_details: Optional[Dict] = Field(None, description="Detailed completion token breakdown (e.g., reasoning_tokens).") prompt_tokens_details: Optional[Dict] = Field( None, description="Detailed prompt token breakdown (e.g., cached_tokens, cache_read_tokens, cache_creation_tokens)." ) stop_reason: Optional[StopReasonType] = Field(None, description="The stop reason associated with the step.") tags: List[str] = Field([], description="Metadata tags.") tid: Optional[str] = Field(None, description="The unique identifier of the transaction that processed this step.") trace_id: Optional[str] = Field(None, description="The trace id of the agent step.") request_id: Optional[str] = Field(None, description="The API request log ID from cloud-api for correlating steps with API requests.") messages: List[Message] = Field( [], description="The messages generated during this step. Deprecated: use `GET /v1/steps/{step_id}/messages` endpoint instead", deprecated=True, ) feedback: Optional[Literal["positive", "negative"]] = Field( None, description="The feedback for this step. Must be either 'positive' or 'negative'." ) project_id: Optional[str] = Field(None, description="The project that the agent that executed this step belongs to (cloud only).") # error tracking fields error_type: Optional[str] = Field(None, description="The type/class of the error that occurred") error_data: Optional[Dict] = Field(None, description="Error details including message, traceback, and additional context") status: Optional[StepStatus] = Field(StepStatus.PENDING, description="Step status: pending, success, or failed") class StepProgression(int, Enum): START = auto() STREAM_RECEIVED = auto() RESPONSE_RECEIVED = auto() STEP_LOGGED = auto() LOGGED_TRACE = auto() FINISHED = auto()