fix: Fix system init messages and anthropic parsing [LET-5385] (#5339)

* Fix system init messages and anthropic parsing

* Add backwards compat tool call id adding
This commit is contained in:
Matthew Zhou
2025-10-10 15:11:38 -07:00
committed by Caren Thomas
parent fe91987749
commit 1c285f5170
4 changed files with 46 additions and 12 deletions

View File

@@ -104,6 +104,7 @@ class Message(SqlalchemyBase, OrganizationMixin, AgentMixin):
and len(self.content) == 1
and isinstance(self.content[0], TextContent)
):
self.tool_call_id = self.tool_returns[0].tool_call_id
self.tool_returns[0].func_response = self.content[0].text
return model

View File

@@ -883,6 +883,12 @@ class Message(BaseMessage):
[TextContent(text=openai_message_dict["content"])] if openai_message_dict["content"] else []
)
# This is really hacky and this interface is poorly designed, we should auto derive tool_returns instead of passing it in
if not tool_returns:
tool_returns = []
if "tool_returns" in openai_message_dict:
tool_returns = [ToolReturn(**tr) for tr in openai_message_dict["tool_returns"]]
# TODO(caren) bad assumption here that "reasoning_content" always comes before "redacted_reasoning_content"
if "reasoning_content" in openai_message_dict and openai_message_dict["reasoning_content"]:
content.append(
@@ -1481,19 +1487,33 @@ class Message(BaseMessage):
elif self.role == "tool":
# NOTE: Anthropic uses role "user" for "tool" responses
assert self.tool_call_id is not None, vars(self)
anthropic_message = {
"role": "user", # NOTE: diff
"content": [
# TODO support error types etc
content = []
for tool_return in self.tool_returns:
content.append(
{
"type": "tool_result",
"tool_use_id": self.tool_call_id,
"content": text_content,
"tool_use_id": tool_return.tool_call_id,
"content": tool_return.func_response,
}
],
}
)
if content:
anthropic_message = {
"role": "user",
"content": content,
}
else:
# This is for legacy reasons
anthropic_message = {
"role": "user", # NOTE: diff
"content": [
# TODO support error types etc
{
"type": "tool_result",
"tool_use_id": self.tool_call_id,
"content": text_content,
}
],
}
else:
raise ValueError(self.role)

View File

@@ -38,7 +38,7 @@ from letta.schemas.embedding_config import EmbeddingConfig
from letta.schemas.enums import AgentType, MessageRole
from letta.schemas.letta_message_content import TextContent
from letta.schemas.memory import Memory
from letta.schemas.message import Message, MessageCreate
from letta.schemas.message import Message, MessageCreate, ToolReturn
from letta.schemas.tool_rule import ToolRule
from letta.schemas.user import User
from letta.settings import DatabaseChoice, settings
@@ -536,6 +536,13 @@ def package_initial_message_sequence(
agent_id=agent_id,
model=model,
tool_call_id=tool_call_id,
tool_returns=[
ToolReturn(
tool_call_id=tool_call_id,
status="success",
func_response=function_response,
)
],
)
)
else:

View File

@@ -42,11 +42,17 @@ def get_initial_boot_messages(version, timezone, tool_call_id):
},
# obligatory function return message
{
# "role": "function",
"role": "tool",
"name": "send_message", # NOTE: technically not up to spec, this is old functions style
"content": package_function_response(True, None, timezone),
"tool_call_id": tool_call_id,
"tool_returns": [
{
"tool_call_id": tool_call_id,
"status": "success",
"func_response": package_function_response(True, None, timezone),
}
],
},
]