chore: remove message.text property (#1253)

This commit is contained in:
cthomas
2025-03-12 10:58:31 -07:00
committed by GitHub
parent a86c268926
commit eddd167f43
16 changed files with 127 additions and 109 deletions

View File

@@ -38,7 +38,7 @@ from letta.orm.enums import ToolType
from letta.schemas.agent import AgentState, AgentStepResponse, UpdateAgent
from letta.schemas.block import BlockUpdate
from letta.schemas.embedding_config import EmbeddingConfig
from letta.schemas.enums import MessageRole
from letta.schemas.enums import MessageContentType, MessageRole
from letta.schemas.memory import ContextWindowOverview, Memory
from letta.schemas.message import Message, ToolReturn
from letta.schemas.openai.chat_completion_response import ChatCompletionResponse
@@ -154,13 +154,14 @@ class Agent(BaseAgent):
in_context_messages = self.agent_manager.get_in_context_messages(agent_id=self.agent_state.id, actor=self.user)
for i in range(len(in_context_messages) - 1, -1, -1):
msg = in_context_messages[i]
if msg.role == MessageRole.tool and msg.text:
if msg.role == MessageRole.tool and msg.content and len(msg.content) == 1 and msg.content[0].type == MessageContentType.text:
text_content = msg.content[0].text
try:
response_json = json.loads(msg.text)
response_json = json.loads(text_content)
if response_json.get("message"):
return response_json["message"]
except (json.JSONDecodeError, KeyError):
raise ValueError(f"Invalid JSON format in message: {msg.text}")
raise ValueError(f"Invalid JSON format in message: {text_content}")
return None
def update_memory_if_changed(self, new_memory: Memory) -> bool:
@@ -1010,7 +1011,7 @@ class Agent(BaseAgent):
err_msg,
details={
"num_in_context_messages": len(self.agent_state.message_ids),
"in_context_messages_text": [m.text for m in in_context_messages],
"in_context_messages_text": [m.content for m in in_context_messages],
"token_counts": token_counts,
},
)
@@ -1164,14 +1165,17 @@ class Agent(BaseAgent):
if (
len(in_context_messages) > 1
and in_context_messages[1].role == MessageRole.user
and isinstance(in_context_messages[1].text, str)
and in_context_messages[1].content
and len(in_context_messages[1].content) == 1
and in_context_messages[1].content[0].type == MessageContentType.text
# TODO remove hardcoding
and "The following is a summary of the previous " in in_context_messages[1].text
and "The following is a summary of the previous " in in_context_messages[1].content[0].text
):
# Summary message exists
assert in_context_messages[1].text is not None
summary_memory = in_context_messages[1].text
num_tokens_summary_memory = count_tokens(in_context_messages[1].text)
text_content = in_context_messages[1].content[0].text
assert text_content is not None
summary_memory = text_content
num_tokens_summary_memory = count_tokens(text_content)
# with a summary message, the real messages start at index 2
num_tokens_messages = (
num_tokens_from_messages(messages=in_context_messages_openai[2:], model=self.model)

View File

@@ -237,7 +237,8 @@ class LowLatencyAgent(BaseAgent):
# TODO: This is a pretty brittle pattern established all over our code, need to get rid of this
curr_system_message = in_context_messages[0]
curr_memory_str = agent_state.memory.compile()
if curr_memory_str in curr_system_message.text:
curr_system_message_text = curr_system_message.content[0].text
if curr_memory_str in curr_system_message_text:
# NOTE: could this cause issues if a block is removed? (substring match would still work)
logger.debug(
f"Memory hasn't changed for agent id={agent_state.id} and actor=({self.actor.id}, {self.actor.name}), skipping system prompt rebuild"
@@ -251,7 +252,7 @@ class LowLatencyAgent(BaseAgent):
in_context_memory_last_edit=memory_edit_timestamp,
)
diff = united_diff(curr_system_message.text, new_system_message_str)
diff = united_diff(curr_system_message_text, new_system_message_str)
if len(diff) > 0:
logger.info(f"Rebuilding system with new memory...\nDiff:\n{diff}")

View File

@@ -56,7 +56,7 @@ def conversation_search(self: "Agent", query: str, page: Optional[int] = 0) -> O
results_str = f"No results found."
else:
results_pref = f"Showing {len(messages)} of {total} results (page {page}/{num_pages}):"
results_formatted = [message.text for message in messages]
results_formatted = [message.content[0].text for message in messages]
results_str = f"{results_pref} {json_dumps(results_formatted)}"
return results_str

View File

@@ -321,7 +321,7 @@ def cohere_chat_completions_request(
# See: https://docs.cohere.com/reference/chat
# The chat_history parameter should not be used for SYSTEM messages in most cases. Instead, to add a SYSTEM role message at the beginning of a conversation, the preamble parameter should be used.
assert msg_objs[0].role == "system", msg_objs[0]
preamble = msg_objs[0].text
preamble = msg_objs[0].content[0].text
# data["messages"] = [m.to_cohere_dict() for m in msg_objs[1:]]
data["messages"] = []

View File

@@ -596,7 +596,6 @@ def create(
messages[0].content[
0
].text += f'Select best function to call simply by responding with a single json block with the keys "function" and "params". Use double quotes around the arguments.'
return get_chat_completion(
model=llm_config.model,
messages=messages,

View File

@@ -36,7 +36,7 @@ def get_memory_functions(cls: Memory) -> Dict[str, Callable]:
def _format_summary_history(message_history: List[Message]):
# TODO use existing prompt formatters for this (eg ChatML)
return "\n".join([f"{m.role}: {m.text}" for m in message_history])
return "\n".join([f"{m.role}: {m.content[0].text}" for m in message_history])
def summarize_messages(

View File

@@ -158,19 +158,6 @@ class Message(BaseMessage):
del data["content"]
return data
@property
def text(self) -> Optional[str]:
"""
Retrieve the first text content's text.
Returns:
str: The text content, or None if no text content exists
"""
if not self.content:
return None
text_content = [content.text for content in self.content if content.type == MessageContentType.text]
return text_content[0] if text_content else None
def to_json(self):
json_message = vars(self)
if json_message["tool_calls"] is not None:
@@ -227,17 +214,21 @@ class Message(BaseMessage):
assistant_message_tool_kwarg: str = DEFAULT_MESSAGE_TOOL_KWARG,
) -> List[LettaMessage]:
"""Convert message object (in DB format) to the style used by the original Letta API"""
if self.content and len(self.content) == 1 and self.content[0].type == MessageContentType.text:
text_content = self.content[0].text
else:
text_content = None
messages = []
if self.role == MessageRole.assistant:
if self.text is not None:
if text_content is not None:
# This is type InnerThoughts
messages.append(
ReasoningMessage(
id=self.id,
date=self.created_at,
reasoning=self.text,
reasoning=text_content,
)
)
if self.tool_calls is not None:
@@ -281,9 +272,9 @@ class Message(BaseMessage):
# "message": response_string,
# "time": formatted_time,
# }
assert self.text is not None, self
assert text_content is not None, self
try:
function_return = json.loads(self.text)
function_return = json.loads(text_content)
status = function_return["status"]
if status == "OK":
status_enum = "success"
@@ -292,7 +283,7 @@ class Message(BaseMessage):
else:
raise ValueError(f"Invalid status: {status}")
except json.JSONDecodeError:
raise ValueError(f"Failed to decode function return: {self.text}")
raise ValueError(f"Failed to decode function return: {text_content}")
assert self.tool_call_id is not None
messages.append(
# TODO make sure this is what the API returns
@@ -300,7 +291,7 @@ class Message(BaseMessage):
ToolReturnMessage(
id=self.id,
date=self.created_at,
tool_return=self.text,
tool_return=text_content,
status=self.tool_returns[0].status if self.tool_returns else status_enum,
tool_call_id=self.tool_call_id,
stdout=self.tool_returns[0].stdout if self.tool_returns else None,
@@ -309,23 +300,23 @@ class Message(BaseMessage):
)
elif self.role == MessageRole.user:
# This is type UserMessage
assert self.text is not None, self
message_str = unpack_message(self.text)
assert text_content is not None, self
message_str = unpack_message(text_content)
messages.append(
UserMessage(
id=self.id,
date=self.created_at,
content=message_str or self.text,
content=message_str or text_content,
)
)
elif self.role == MessageRole.system:
# This is type SystemMessage
assert self.text is not None, self
assert text_content is not None, self
messages.append(
SystemMessage(
id=self.id,
date=self.created_at,
content=self.text,
content=text_content,
)
)
else:
@@ -494,11 +485,15 @@ class Message(BaseMessage):
"""Go from Message class to ChatCompletion message object"""
# TODO change to pydantic casting, eg `return SystemMessageModel(self)`
if self.content and len(self.content) == 1 and self.content[0].type == MessageContentType.text:
text_content = self.content[0].text
else:
text_content = None
if self.role == "system":
assert all([v is not None for v in [self.role]]), vars(self)
openai_message = {
"content": self.text,
"content": text_content,
"role": self.role,
}
# Optional field, do not include if null
@@ -506,9 +501,9 @@ class Message(BaseMessage):
openai_message["name"] = self.name
elif self.role == "user":
assert all([v is not None for v in [self.text, self.role]]), vars(self)
assert all([v is not None for v in [text_content, self.role]]), vars(self)
openai_message = {
"content": self.text,
"content": text_content,
"role": self.role,
}
# Optional field, do not include if null
@@ -516,9 +511,9 @@ class Message(BaseMessage):
openai_message["name"] = self.name
elif self.role == "assistant":
assert self.tool_calls is not None or self.text is not None
assert self.tool_calls is not None or text_content is not None
openai_message = {
"content": None if put_inner_thoughts_in_kwargs else self.text,
"content": None if put_inner_thoughts_in_kwargs else text_content,
"role": self.role,
}
# Optional fields, do not include if null
@@ -530,7 +525,7 @@ class Message(BaseMessage):
openai_message["tool_calls"] = [
add_inner_thoughts_to_tool_call(
tool_call,
inner_thoughts=self.text,
inner_thoughts=text_content,
inner_thoughts_key=INNER_THOUGHTS_KWARG,
).model_dump()
for tool_call in self.tool_calls
@@ -544,7 +539,7 @@ class Message(BaseMessage):
elif self.role == "tool":
assert all([v is not None for v in [self.role, self.tool_call_id]]), vars(self)
openai_message = {
"content": self.text,
"content": text_content,
"role": self.role,
"tool_call_id": self.tool_call_id[:max_tool_id_length] if max_tool_id_length else self.tool_call_id,
}
@@ -565,6 +560,10 @@ class Message(BaseMessage):
Args:
inner_thoughts_xml_tag (str): The XML tag to wrap around inner thoughts
"""
if self.content and len(self.content) == 1 and self.content[0].type == MessageContentType.text:
text_content = self.content[0].text
else:
text_content = None
def add_xml_tag(string: str, xml_tag: Optional[str]):
# NOTE: Anthropic docs recommends using <thinking> tag when using CoT + tool use
@@ -573,34 +572,34 @@ class Message(BaseMessage):
if self.role == "system":
# NOTE: this is not for system instructions, but instead system "events"
assert all([v is not None for v in [self.text, self.role]]), vars(self)
assert all([v is not None for v in [text_content, self.role]]), vars(self)
# Two options here, we would use system.package_system_message,
# or use a more Anthropic-specific packaging ie xml tags
user_system_event = add_xml_tag(string=f"SYSTEM ALERT: {self.text}", xml_tag="event")
user_system_event = add_xml_tag(string=f"SYSTEM ALERT: {text_content}", xml_tag="event")
anthropic_message = {
"content": user_system_event,
"role": "user",
}
elif self.role == "user":
assert all([v is not None for v in [self.text, self.role]]), vars(self)
assert all([v is not None for v in [text_content, self.role]]), vars(self)
anthropic_message = {
"content": self.text,
"content": text_content,
"role": self.role,
}
elif self.role == "assistant":
assert self.tool_calls is not None or self.text is not None
assert self.tool_calls is not None or text_content is not None
anthropic_message = {
"role": self.role,
}
content = []
# COT / reasoning / thinking
if self.text is not None and not put_inner_thoughts_in_kwargs:
if text_content is not None and not put_inner_thoughts_in_kwargs:
content.append(
{
"type": "text",
"text": add_xml_tag(string=self.text, xml_tag=inner_thoughts_xml_tag),
"text": add_xml_tag(string=text_content, xml_tag=inner_thoughts_xml_tag),
}
)
# Tool calling
@@ -610,7 +609,7 @@ class Message(BaseMessage):
if put_inner_thoughts_in_kwargs:
tool_call_input = add_inner_thoughts_to_tool_call(
tool_call,
inner_thoughts=self.text,
inner_thoughts=text_content,
inner_thoughts_key=INNER_THOUGHTS_KWARG,
).model_dump()
else:
@@ -639,7 +638,7 @@ class Message(BaseMessage):
{
"type": "tool_result",
"tool_use_id": self.tool_call_id,
"content": self.text,
"content": text_content,
}
],
}
@@ -656,6 +655,10 @@ class Message(BaseMessage):
# type Content: https://ai.google.dev/api/rest/v1/Content / https://ai.google.dev/api/rest/v1beta/Content
# parts[]: Part
# role: str ('user' or 'model')
if self.content and len(self.content) == 1 and self.content[0].type == MessageContentType.text:
text_content = self.content[0].text
else:
text_content = None
if self.role != "tool" and self.name is not None:
warnings.warn(f"Using Google AI with non-null 'name' field ({self.name}) not yet supported.")
@@ -665,18 +668,18 @@ class Message(BaseMessage):
# https://www.reddit.com/r/Bard/comments/1b90i8o/does_gemini_have_a_system_prompt_option_while/
google_ai_message = {
"role": "user", # NOTE: no 'system'
"parts": [{"text": self.text}],
"parts": [{"text": text_content}],
}
elif self.role == "user":
assert all([v is not None for v in [self.text, self.role]]), vars(self)
assert all([v is not None for v in [text_content, self.role]]), vars(self)
google_ai_message = {
"role": "user",
"parts": [{"text": self.text}],
"parts": [{"text": text_content}],
}
elif self.role == "assistant":
assert self.tool_calls is not None or self.text is not None
assert self.tool_calls is not None or text_content is not None
google_ai_message = {
"role": "model", # NOTE: different
}
@@ -684,10 +687,10 @@ class Message(BaseMessage):
# NOTE: Google AI API doesn't allow non-null content + function call
# To get around this, just two a two part message, inner thoughts first then
parts = []
if not put_inner_thoughts_in_kwargs and self.text is not None:
if not put_inner_thoughts_in_kwargs and text_content is not None:
# NOTE: ideally we do multi-part for CoT / inner thoughts + function call, but Google AI API doesn't allow it
raise NotImplementedError
parts.append({"text": self.text})
parts.append({"text": text_content})
if self.tool_calls is not None:
# NOTE: implied support for multiple calls
@@ -701,10 +704,10 @@ class Message(BaseMessage):
raise UserWarning(f"Failed to parse JSON function args: {function_args}")
function_args = {"args": function_args}
if put_inner_thoughts_in_kwargs and self.text is not None:
if put_inner_thoughts_in_kwargs and text_content is not None:
assert "inner_thoughts" not in function_args, function_args
assert len(self.tool_calls) == 1
function_args[INNER_THOUGHTS_KWARG] = self.text
function_args[INNER_THOUGHTS_KWARG] = text_content
parts.append(
{
@@ -715,8 +718,8 @@ class Message(BaseMessage):
}
)
else:
assert self.text is not None
parts.append({"text": self.text})
assert text_content is not None
parts.append({"text": text_content})
google_ai_message["parts"] = parts
elif self.role == "tool":
@@ -731,9 +734,9 @@ class Message(BaseMessage):
# NOTE: Google AI API wants the function response as JSON only, no string
try:
function_response = json.loads(self.text)
function_response = json.loads(text_content)
except:
function_response = {"function_response": self.text}
function_response = {"function_response": text_content}
google_ai_message = {
"role": "function",
@@ -778,7 +781,10 @@ class Message(BaseMessage):
# TODO: update this prompt style once guidance from Cohere on
# embedded function calls in multi-turn conversation become more clear
if self.content and len(self.content) == 1 and self.content[0].type == MessageContentType.text:
text_content = self.content[0].text
else:
text_content = None
if self.role == "system":
"""
The chat_history parameter should not be used for SYSTEM messages in most cases.
@@ -787,26 +793,26 @@ class Message(BaseMessage):
raise UserWarning(f"role 'system' messages should go in 'preamble' field for Cohere API")
elif self.role == "user":
assert all([v is not None for v in [self.text, self.role]]), vars(self)
assert all([v is not None for v in [text_content, self.role]]), vars(self)
cohere_message = [
{
"role": "USER",
"message": self.text,
"message": text_content,
}
]
elif self.role == "assistant":
# NOTE: we may break this into two message - an inner thought and a function call
# Optionally, we could just make this a function call with the inner thought inside
assert self.tool_calls is not None or self.text is not None
assert self.tool_calls is not None or text_content is not None
if self.text and self.tool_calls:
if text_content and self.tool_calls:
if inner_thoughts_as_kwarg:
raise NotImplementedError
cohere_message = [
{
"role": "CHATBOT",
"message": self.text,
"message": text_content,
},
]
for tc in self.tool_calls:
@@ -820,7 +826,7 @@ class Message(BaseMessage):
"message": f"{function_call_prefix} {function_call_text}",
}
)
elif not self.text and self.tool_calls:
elif not text_content and self.tool_calls:
cohere_message = []
for tc in self.tool_calls:
# TODO better way to pack?
@@ -831,11 +837,11 @@ class Message(BaseMessage):
"message": f"{function_call_prefix} {function_call_text}",
}
)
elif self.text and not self.tool_calls:
elif text_content and not self.tool_calls:
cohere_message = [
{
"role": "CHATBOT",
"message": self.text,
"message": text_content,
}
]
else:
@@ -843,7 +849,7 @@ class Message(BaseMessage):
elif self.role == "tool":
assert all([v is not None for v in [self.role, self.tool_call_id]]), vars(self)
function_response_text = self.text
function_response_text = text_content
cohere_message = [
{
"role": function_response_role,

View File

@@ -601,11 +601,12 @@ class SyncServer(Server):
if isinstance(message, Message):
# Can't have a null text field
if message.text is None or len(message.text) == 0:
raise ValueError(f"Invalid input: '{message.text}'")
message_text = message.content[0].text
if message_text is None or len(message_text) == 0:
raise ValueError(f"Invalid input: '{message_text}'")
# If the input begins with a command prefix, reject
elif message.text.startswith("/"):
raise ValueError(f"Invalid input: '{message.text}'")
elif message_text.startswith("/"):
raise ValueError(f"Invalid input: '{message_text}'")
else:
raise TypeError(f"Invalid input: '{message}' - type {type(message)}")

View File

@@ -96,7 +96,7 @@ class Summarizer:
)
messages = await self.summarizer_agent.step(UserMessage(content=summary_request_text))
current_summary = "\n".join([m.text for m in messages])
current_summary = "\n".join([m.content[0].text for m in messages])
current_summary = f"{self.summary_prefix}{current_summary}"
return updated_in_context_messages, current_summary, True

View File

@@ -112,11 +112,11 @@ def test_send_message_to_agent(client, agent_obj, other_agent_obj):
target_snippet = f"{other_agent_obj.agent_state.id} said:"
for m in in_context_messages:
if target_snippet in m.text:
if target_snippet in m.content[0].text:
found = True
break
print(f"In context messages of the sender agent (without system):\n\n{"\n".join([m.text for m in in_context_messages[1:]])}")
print(f"In context messages of the sender agent (without system):\n\n{"\n".join([m.content[0].text for m in in_context_messages[1:]])}")
if not found:
raise Exception(f"Was not able to find an instance of the target snippet: {target_snippet}")

View File

@@ -192,8 +192,8 @@ def test_auto_summarize(client, mock_e2b_api_key_none):
def summarize_message_exists(messages: List[Message]) -> bool:
for message in messages:
if message.text and "The following is a summary of the previous" in message.text:
print(f"Summarize message found after {message_count} messages: \n {message.text}")
if message.content[0].text and "The following is a summary of the previous" in message.content[0].text:
print(f"Summarize message found after {message_count} messages: \n {message.content[0].text}")
return True
return False
@@ -277,4 +277,4 @@ def test_summarizer(config_filename, client, agent_state):
# Invoke a summarize
letta_agent.summarize_messages_inplace()
in_context_messages = client.get_in_context_messages(agent_state.id)
assert SUMMARY_KEY_PHRASE in in_context_messages[1].text, f"Test failed for config: {config_filename}"
assert SUMMARY_KEY_PHRASE in in_context_messages[1].content[0].text, f"Test failed for config: {config_filename}"

View File

@@ -628,7 +628,7 @@ def test_initial_message_sequence(client: Union[LocalClient, RESTClient], agent:
), f"Expected {len(custom_sequence) + 1} messages, got {len(custom_agent_state.message_ids)}"
# assert custom_agent_state.message_ids[1:] == [msg.id for msg in custom_sequence]
# shoule be contained in second message (after system message)
assert custom_sequence[0].content in client.get_in_context_messages(custom_agent_state.id)[1].text
assert custom_sequence[0].content in client.get_in_context_messages(custom_agent_state.id)[1].content[0].text
def test_add_and_manage_tags_for_agent(client: Union[LocalClient, RESTClient], agent: AgentState):

View File

@@ -262,7 +262,7 @@ def test_recall_memory(client: LocalClient, agent: AgentState):
in_context_messages = client.get_in_context_messages(agent.id)
exists = False
for m in in_context_messages:
if message_str in m.text:
if message_str in m.content[0].text:
exists = True
assert exists

View File

@@ -560,11 +560,11 @@ def test_create_agent_passed_in_initial_messages(server: SyncServer, default_use
assert server.message_manager.size(agent_id=agent_state.id, actor=default_user) == 2
init_messages = server.agent_manager.get_in_context_messages(agent_id=agent_state.id, actor=default_user)
# Check that the system appears in the first initial message
assert create_agent_request.system in init_messages[0].text
assert create_agent_request.memory_blocks[0].value in init_messages[0].text
assert create_agent_request.system in init_messages[0].content[0].text
assert create_agent_request.memory_blocks[0].value in init_messages[0].content[0].text
# Check that the second message is the passed in initial message seq
assert create_agent_request.initial_message_sequence[0].role == init_messages[1].role
assert create_agent_request.initial_message_sequence[0].content in init_messages[1].text
assert create_agent_request.initial_message_sequence[0].content in init_messages[1].content[0].text
def test_create_agent_default_initial_message(server: SyncServer, default_user, default_block):
@@ -585,8 +585,8 @@ def test_create_agent_default_initial_message(server: SyncServer, default_user,
assert server.message_manager.size(agent_id=agent_state.id, actor=default_user) == 4
init_messages = server.agent_manager.get_in_context_messages(agent_id=agent_state.id, actor=default_user)
# Check that the system appears in the first initial message
assert create_agent_request.system in init_messages[0].text
assert create_agent_request.memory_blocks[0].value in init_messages[0].text
assert create_agent_request.system in init_messages[0].content[0].text
assert create_agent_request.memory_blocks[0].value in init_messages[0].content[0].text
def test_update_agent(server: SyncServer, comprehensive_test_agent_fixture, other_tool, other_source, other_block, default_user):
@@ -1915,7 +1915,7 @@ def test_upsert_base_tools(server: SyncServer, default_user):
def test_message_create(server: SyncServer, hello_world_message_fixture, default_user):
"""Test creating a message using hello_world_message_fixture fixture"""
assert hello_world_message_fixture.id is not None
assert hello_world_message_fixture.text == "Hello, world!"
assert hello_world_message_fixture.content[0].text == "Hello, world!"
assert hello_world_message_fixture.role == "user"
# Verify we can retrieve it
@@ -1925,7 +1925,7 @@ def test_message_create(server: SyncServer, hello_world_message_fixture, default
)
assert retrieved is not None
assert retrieved.id == hello_world_message_fixture.id
assert retrieved.text == hello_world_message_fixture.text
assert retrieved.content[0].text == hello_world_message_fixture.content[0].text
assert retrieved.role == hello_world_message_fixture.role
@@ -1934,7 +1934,7 @@ def test_message_get_by_id(server: SyncServer, hello_world_message_fixture, defa
retrieved = server.message_manager.get_message_by_id(hello_world_message_fixture.id, actor=default_user)
assert retrieved is not None
assert retrieved.id == hello_world_message_fixture.id
assert retrieved.text == hello_world_message_fixture.text
assert retrieved.content[0].text == hello_world_message_fixture.content[0].text
def test_message_update(server: SyncServer, hello_world_message_fixture, default_user, other_user):
@@ -1942,9 +1942,9 @@ def test_message_update(server: SyncServer, hello_world_message_fixture, default
new_text = "Updated text"
updated = server.message_manager.update_message_by_id(hello_world_message_fixture.id, MessageUpdate(content=new_text), actor=other_user)
assert updated is not None
assert updated.text == new_text
assert updated.content[0].text == new_text
retrieved = server.message_manager.get_message_by_id(hello_world_message_fixture.id, actor=default_user)
assert retrieved.text == new_text
assert retrieved.content[0].text == new_text
# Assert that orm metadata fields are populated
assert retrieved.created_by_id == default_user.id
@@ -2073,7 +2073,7 @@ def test_message_listing_text_search(server: SyncServer, hello_world_message_fix
agent_id=sarah_agent.id, actor=default_user, query_text="Test message", limit=10
)
assert len(search_results) == 4
assert all("Test message" in msg.text for msg in search_results)
assert all("Test message" in msg.content[0].text for msg in search_results)
# Test no results
search_results = server.message_manager.list_user_messages_for_agent(
@@ -3056,7 +3056,7 @@ def test_job_messages_add(server: SyncServer, default_run, hello_world_message_f
)
assert len(messages) == 1
assert messages[0].id == hello_world_message_fixture.id
assert messages[0].text == hello_world_message_fixture.text
assert messages[0].content[0].text == hello_world_message_fixture.content[0].text
def test_job_messages_pagination(server: SyncServer, default_run, default_user, sarah_agent):

View File

@@ -662,7 +662,9 @@ def _test_get_messages_letta_format(
print(f"Error: letta_message_index out of range. Expected more letta_messages for message {i}: {message.role}")
raise ValueError(f"Mismatch in letta_messages length. Index: {letta_message_index}, Length: {len(letta_messages)}")
print(f"Processing message {i}: {message.role}, {message.text[:50] if message.text else 'null'}")
print(
f"Processing message {i}: {message.role}, {message.content[0].text[:50] if message.content and len(message.content) == 1 else 'null'}"
)
while letta_message_index < len(letta_messages):
letta_message = letta_messages[letta_message_index]
@@ -684,14 +686,14 @@ def _test_get_messages_letta_format(
break
letta_message = letta_messages[letta_message_index]
if message.text:
if message.content[0].text:
assert isinstance(letta_message, ReasoningMessage)
letta_message_index += 1
else:
assert message.tool_calls is not None
else: # Non-reverse handling
if message.text:
if message.content[0].text:
assert isinstance(letta_message, ReasoningMessage)
letta_message_index += 1
if letta_message_index >= len(letta_messages):
@@ -714,17 +716,17 @@ def _test_get_messages_letta_format(
elif message.role == MessageRole.user:
assert isinstance(letta_message, UserMessage)
assert unpack_message(message.text) == letta_message.content
assert unpack_message(message.content[0].text) == letta_message.content
letta_message_index += 1
elif message.role == MessageRole.system:
assert isinstance(letta_message, SystemMessage)
assert message.text == letta_message.content
assert message.content[0].text == letta_message.content
letta_message_index += 1
elif message.role == MessageRole.tool:
assert isinstance(letta_message, ToolReturnMessage)
assert message.text == letta_message.tool_return
assert message.content[0].text == letta_message.tool_return
letta_message_index += 1
else:

View File

@@ -11,6 +11,7 @@ from letta.config import LettaConfig
from letta.data_sources.connectors import DataConnector
from letta.schemas.enums import MessageRole
from letta.schemas.file import FileMetadata
from letta.schemas.message import Message
from letta.settings import TestSettings
from .constants import TIMEOUT
@@ -165,8 +166,12 @@ def wait_for_incoming_message(
while time.time() < deadline:
messages = client.server.message_manager.list_messages_for_agent(agent_id=agent_id, actor=client.user)
# Check for the system message containing `substring`
if any(message.role == MessageRole.system and substring in (message.text or "") for message in messages):
def get_message_text(message: Message) -> str:
return message.content[0].text if message.content and len(message.content) == 1 else ""
if any(message.role == MessageRole.system and substring in get_message_text(message) for message in messages):
return True
time.sleep(sleep_interval)