test: add sleeptime agent tests (#1661)

This commit is contained in:
cthomas
2025-04-10 15:12:25 -07:00
committed by GitHub
parent c8b8055a43
commit 55b9fce827
3 changed files with 288 additions and 413 deletions

View File

@@ -1,222 +0,0 @@
import pytest
from letta import BasicBlockMemory
from letta.client.client import create_client
from letta.constants import DEFAULT_HUMAN, DEFAULT_PERSONA
from letta.prompts import gpt_system
from letta.schemas.agent import AgentType
from letta.schemas.embedding_config import EmbeddingConfig
from letta.schemas.llm_config import LLMConfig
from letta.schemas.memory import BasicBlockMemory, Block
from letta.schemas.tool_rule import TerminalToolRule
from letta.sleeptime_agent import finish_rethinking_memory, rethink_memory, trigger_rethink_memory
from letta.utils import get_human_text, get_persona_text
@pytest.fixture(scope="module")
def client():
client = create_client()
client.set_default_llm_config(LLMConfig.default_config("gpt-4o-mini"))
client.set_default_embedding_config(EmbeddingConfig.default_config(provider="openai"))
yield client
@pytest.fixture(autouse=True)
def clear_agents(client):
for agent in client.list_agents():
client.delete_agent(agent.id)
def test_ripple_edit(client, disable_e2b_api_key):
trigger_rethink_memory_tool = client.create_or_update_tool(trigger_rethink_memory)
send_message = client.server.tool_manager.get_tool_by_name(tool_name="send_message", actor=client.user)
conversation_human_block = client.create_block(label="human", value=get_human_text(DEFAULT_HUMAN), limit=2000)
conversation_persona_block = client.create_block(label="persona", value=get_persona_text(DEFAULT_PERSONA), limit=2000)
offline_human_block = client.create_block(label="human", value=get_human_text(DEFAULT_HUMAN), limit=2000)
offline_persona_block = client.create_block(label="persona", value=get_persona_text("offline_memory_persona"), limit=2000)
# Figure 1. from Evaluating the Ripple Effects of Knowledge Editing in Language Models (Cohen et al., 2023)
# https://arxiv.org/pdf/2307.12976
fact_block = client.create_block(
label="fact_block",
value="""Messi resides in the Paris.
Messi plays in the league Ligue 1.
Messi plays for the team Paris Saint-Germain.
The national team Messi plays for is the Argentina team.
Messi is also known as Leo Messi
Victor Ulloa plays for Inter Miami""",
limit=2000,
)
new_memory = client.create_block(label="rethink_memory_block", value="[empty]", limit=2000)
# conversation_human_block = Block(name="human", label="human", value=get_human_text(DEFAULT_HUMAN), limit=2000)
# conversation_persona_block = Block(name="persona", label="persona", value=get_persona_text(DEFAULT_PERSONA), limit=2000)
# offline_human_block = Block(name="human", label="human", value=get_human_text(DEFAULT_HUMAN), limit=2000)
# offline_persona_block = Block(name="persona", label="persona", value=get_persona_text("offline_memory_persona"), limit=2000)
## Figure 1. from Evaluating the Ripple Effects of Knowledge Editing in Language Models (Cohen et al., 2023)
## https://arxiv.org/pdf/2307.12976
# fact_block = Block(
# name="fact_block",
# label="fact_block",
# value="""Messi resides in the Paris.
# Messi plays in the league Ligue 1.
# Messi plays for the team Paris Saint-Germain.
# The national team Messi plays for is the Argentina team.
# Messi is also known as Leo Messi
# Victor Ulloa plays for Inter Miami""",
# limit=2000,
# )
# new_memory = Block(name="rethink_memory_block", label="rethink_memory_block", value="[empty]", limit=2000)
# conversation_memory = BasicBlockMemory(blocks=[conversation_persona_block, conversation_human_block, fact_block, new_memory])
conversation_memory = BasicBlockMemory(blocks=[conversation_persona_block, conversation_human_block, fact_block])
# offline_memory = BasicBlockMemory(blocks=[offline_persona_block, offline_human_block, fact_block, new_memory])
offline_memory = BasicBlockMemory(blocks=[offline_persona_block, offline_human_block, fact_block])
conversation_agent = client.create_agent(
name="conversation_agent",
agent_type=AgentType.memgpt_agent,
system=gpt_system.get_system_text("memgpt_convo_only"),
llm_config=LLMConfig.default_config("gpt-4"),
embedding_config=EmbeddingConfig.default_config("text-embedding-ada-002"),
tool_ids=[send_message.id, trigger_rethink_memory_tool.id],
memory=conversation_memory,
block_ids=[new_memory.id],
include_base_tools=False,
)
assert conversation_agent is not None
assert set(conversation_agent.memory.list_block_labels()) == {"persona", "human", "fact_block", "rethink_memory_block"}
rethink_memory_tool = client.create_or_update_tool(rethink_memory)
finish_rethinking_memory_tool = client.create_or_update_tool(finish_rethinking_memory)
offline_memory_agent = client.create_agent(
name="offline_memory_agent",
agent_type=AgentType.sleeptime_agent,
system=gpt_system.get_system_text("memgpt_offline_memory"),
memory=offline_memory,
llm_config=LLMConfig.default_config("gpt-4"),
embedding_config=EmbeddingConfig.default_config("text-embedding-ada-002"),
tool_ids=[rethink_memory_tool.id, finish_rethinking_memory_tool.id],
tool_rules=[TerminalToolRule(tool_name=finish_rethinking_memory_tool.name)],
block_ids=[new_memory.id],
include_base_tools=False,
)
assert offline_memory_agent is not None
assert set(offline_memory_agent.memory.list_block_labels()) == {"persona", "human", "fact_block", "rethink_memory_block"}
response = client.user_message(
agent_id=conversation_agent.id, message="[trigger_rethink_memory]: Messi has now moved to playing for Inter Miami"
)
offline_memory_agent = client.get_agent(agent_id=offline_memory_agent.id)
assert offline_memory_agent.memory.get_block("rethink_memory_block").value != "[empty]"
conversation_agent = client.get_agent(agent_id=conversation_agent.id)
assert conversation_agent.memory.get_block("rethink_memory_block").value != "[empty]"
# Clean up agent
client.delete_agent(conversation_agent.id)
client.delete_agent(offline_memory_agent.id)
def test_chat_only_agent(client, disable_e2b_api_key):
from letta.offline_memory_agent import finish_rethinking_memory, rethink_memory
send_message = client.server.tool_manager.get_tool_by_name(tool_name="send_message", actor=client.user)
rethink_memory = client.create_or_update_tool(rethink_memory)
finish_rethinking_memory = client.create_or_update_tool(finish_rethinking_memory)
conversation_human_block = client.create_block(label="chat_only_agent_human", value=get_human_text(DEFAULT_HUMAN), limit=2000)
conversation_persona_block = client.create_block(label="chat_only_agent_persona", value=get_persona_text(DEFAULT_PERSONA), limit=2000)
offline_persona_block = Block(
name="offline_memory_persona",
label="offline_memory_persona",
value=get_persona_text("offline_memory_persona"),
limit=2000,
)
offline_memory = BasicBlockMemory(
blocks=[
offline_persona_block,
conversation_human_block,
conversation_persona_block,
]
)
offline_memory_agent = client.create_agent(
name="offline_memory_agent",
agent_type=AgentType.sleeptime_agent,
system=gpt_system.get_system_text("memgpt_memory_only"),
memory=offline_memory,
llm_config=LLMConfig.default_config("gpt-4"),
embedding_config=EmbeddingConfig.default_config("text-embedding-ada-002"),
tool_ids=[rethink_memory.id, finish_rethinking_memory.id],
include_base_tools=False,
)
chat_only_agent = client.create_agent(
name="conversation_agent",
agent_type=AgentType.memgpt_agent,
llm_config=LLMConfig.default_config("gpt-4"),
system=gpt_system.get_system_text("memgpt_convo_only"),
embedding_config=EmbeddingConfig.default_config("text-embedding-ada-002"),
tool_ids=[send_message.id],
memory=BasicBlockMemory(blocks=[conversation_persona_block, conversation_human_block]),
include_base_tools=False,
)
assert chat_only_agent is not None
assert offline_memory_agent is not None
# NOTE: the system messages sent to the offline memory agent here are not the actual messages,
# from the chat only agent. We need to actually stream the responses from the agent to the offline memory
# these are just for testing
for message in [
("hello", "user"),
("hi chad, how's it going?", "system"),
("my name is not chad, my name is swoodily", "user"),
("I'm sorry, I'm make a note of that, swoodily", "system"),
("what's the weather like today?", "user"),
("could you specify where you are at?", "system"),
("I'm in SF", "user"),
("it's currently 60 degrees in SF", "system"),
("actually, I'm in Palo Alto", "user"),
("it's currently 60 degrees in Palo Alto", "system"),
("that sounds nice, I might go for a hike today then", "user"),
("Here are some hikes near Palo Alto: 1. The Dish 2. Redtail Loop 3. Adobe Creek", "system"),
("I'm going to try the first one, the Dish", "user"),
("That sounds great, hope you enjoy it", "system"),
]:
if message[1] == "user":
client.send_message(agent_id=chat_only_agent.id, message=message[0], role=message[1])
client.send_message(agent_id=offline_memory_agent.id, message=message[0], role=message[1])
else:
client.send_message(agent_id=offline_memory_agent.id, message=message[0], role=message[1])
offline_memory_agent = client.get_agent(agent_id=offline_memory_agent.id)
chat_only_agent = client.get_agent(agent_id=chat_only_agent.id)
assert chat_only_agent.memory.get_block("chat_only_agent_human").value != get_human_text(DEFAULT_HUMAN)
client.delete_agent(chat_only_agent.id)
client.delete_agent(offline_memory_agent.id)
def test_initial_message_sequence(client, disable_e2b_api_key):
"""
Test that when we set the initial sequence to an empty list,
we do not get the default initial message sequence.
"""
offline_memory_agent = client.create_agent(
name="offline_memory_agent",
agent_type=AgentType.sleeptime_agent,
system=gpt_system.get_system_text("memgpt_offline_memory"),
llm_config=LLMConfig.default_config("gpt-4"),
embedding_config=EmbeddingConfig.default_config("text-embedding-ada-002"),
include_base_tools=False,
initial_message_sequence=[],
)
assert offline_memory_agent is not None
assert len(offline_memory_agent.message_ids) == 1 # There should just the system message
client.delete_agent(offline_memory_agent.id)

View File

@@ -0,0 +1,284 @@
import time
import pytest
from sqlalchemy import delete
from letta.config import LettaConfig
from letta.constants import DEFAULT_HUMAN
from letta.orm import Provider, Step
from letta.orm.enums import JobType
from letta.orm.errors import NoResultFound
from letta.schemas.agent import CreateAgent
from letta.schemas.block import CreateBlock
from letta.schemas.enums import JobStatus, ToolRuleType
from letta.schemas.group import GroupUpdate, ManagerType, SleeptimeManagerUpdate
from letta.schemas.message import MessageCreate
from letta.schemas.run import Run
from letta.server.server import SyncServer
from letta.utils import get_human_text, get_persona_text
@pytest.fixture(scope="module")
def server():
config = LettaConfig.load()
print("CONFIG PATH", config.config_path)
config.save()
server = SyncServer()
return server
@pytest.fixture(scope="module")
def org_id(server):
org = server.organization_manager.create_default_organization()
yield org.id
# cleanup
with server.organization_manager.session_maker() as session:
session.execute(delete(Step))
session.execute(delete(Provider))
session.commit()
server.organization_manager.delete_organization_by_id(org.id)
@pytest.fixture(scope="module")
def actor(server, org_id):
user = server.user_manager.create_default_user()
yield user
# cleanup
server.user_manager.delete_user_by_id(user.id)
@pytest.mark.asyncio
async def test_sleeptime_group_chat(server, actor):
# 0. Refresh base tools
server.tool_manager.upsert_base_tools(actor=actor)
# 1. Create sleeptime agent
main_agent = server.create_agent(
request=CreateAgent(
name="main_agent",
memory_blocks=[
CreateBlock(
label="persona",
value="You are a personal assistant that helps users with requests.",
),
CreateBlock(
label="human",
value="My favorite plant is the fiddle leaf\nMy favorite color is lavender",
),
],
# model="openai/gpt-4o-mini",
model="anthropic/claude-3-5-sonnet-20240620",
embedding="openai/text-embedding-ada-002",
enable_sleeptime=True,
),
actor=actor,
)
assert main_agent.enable_sleeptime == True
main_agent_tools = [tool.name for tool in main_agent.tools]
assert "core_memory_append" not in main_agent_tools
assert "core_memory_replace" not in main_agent_tools
assert "archival_memory_insert" not in main_agent_tools
# 2. Override frequency for test
group = server.group_manager.modify_group(
group_id=main_agent.multi_agent_group.id,
group_update=GroupUpdate(
manager_config=SleeptimeManagerUpdate(
sleeptime_agent_frequency=2,
),
),
actor=actor,
)
assert group.manager_type == ManagerType.sleeptime
assert group.sleeptime_agent_frequency == 2
assert len(group.agent_ids) == 1
# 3. Verify shared blocks
sleeptime_agent_id = group.agent_ids[0]
shared_block = server.agent_manager.get_block_with_label(agent_id=main_agent.id, block_label="human", actor=actor)
agents = server.block_manager.get_agents_for_block(block_id=shared_block.id, actor=actor)
assert len(agents) == 2
assert sleeptime_agent_id in [agent.id for agent in agents]
assert main_agent.id in [agent.id for agent in agents]
# 4 Verify sleeptime agent tools
sleeptime_agent = server.agent_manager.get_agent_by_id(agent_id=sleeptime_agent_id, actor=actor)
sleeptime_agent_tools = [tool.name for tool in sleeptime_agent.tools]
assert "rethink_memory" in sleeptime_agent_tools
assert "finish_rethinking_memory" in sleeptime_agent_tools
assert "view_core_memory_with_line_numbers" in sleeptime_agent_tools
assert "core_memory_insert" in sleeptime_agent_tools
assert len([rule for rule in sleeptime_agent.tool_rules if rule.type == ToolRuleType.parent_last_tool]) > 0
# 5. Send messages and verify run ids
message_text = [
"my favorite color is orange",
"not particularly. today is a good day",
"actually my favorite color is coral",
"let's change the subject",
"actually my fav plant is the the african spear",
"indeed",
]
run_ids = []
for i, text in enumerate(message_text):
response = await server.send_message_to_agent(
agent_id=main_agent.id,
actor=actor,
messages=[
MessageCreate(
role="user",
content=text,
),
],
stream_steps=False,
stream_tokens=False,
)
assert len(response.messages) > 0
assert len(response.usage.run_ids or []) == i % 2
run_ids.extend(response.usage.run_ids or [])
jobs = server.job_manager.list_jobs(actor=actor, job_type=JobType.RUN)
runs = [Run.from_job(job) for job in jobs]
agent_runs = [run for run in runs if "agent_id" in run.metadata and run.metadata["agent_id"] == sleeptime_agent_id]
assert len(agent_runs) == len(run_ids)
for run_id in run_ids:
job = server.job_manager.get_job_by_id(job_id=run_id, actor=actor)
assert job.status == JobStatus.completed
# 6. Delete agent
server.agent_manager.delete_agent(agent_id=main_agent.id, actor=actor)
with pytest.raises(NoResultFound):
server.group_manager.retrieve_group(group_id=group.id, actor=actor)
with pytest.raises(NoResultFound):
server.agent_manager.get_agent_by_id(agent_id=sleeptime_agent_id, actor=actor)
@pytest.mark.flaky
@pytest.mark.asyncio
async def test_sleeptime_removes_redundant_information(server, actor):
# 1. set up sleep-time agent as in test_sleeptime_group_chat
server.tool_manager.upsert_base_tools(actor=actor)
main_agent = server.create_agent(
request=CreateAgent(
name="main_agent",
memory_blocks=[
CreateBlock(
label="persona",
value="You are a personal assistant that helps users with requests.",
),
CreateBlock(
label="human",
value="My favorite plant is the fiddle leaf\nMy favorite dog is the husky\nMy favorite plant is the fiddle leaf\nMy favorite plant is the fiddle leaf",
),
],
model="anthropic/claude-3-5-sonnet-20240620",
embedding="openai/text-embedding-ada-002",
enable_sleeptime=True,
),
actor=actor,
)
group = server.group_manager.modify_group(
group_id=main_agent.multi_agent_group.id,
group_update=GroupUpdate(
manager_config=SleeptimeManagerUpdate(
sleeptime_agent_frequency=1,
),
),
actor=actor,
)
sleeptime_agent_id = group.agent_ids[0]
shared_block = server.agent_manager.get_block_with_label(agent_id=main_agent.id, block_label="human", actor=actor)
count_before_memory_edits = shared_block.value.count("fiddle leaf")
test_messages = ["hello there", "my favorite bird is the sparrow"]
for test_message in test_messages:
_ = await server.send_message_to_agent(
agent_id=main_agent.id,
actor=actor,
messages=[
MessageCreate(
role="user",
content=test_message,
),
],
stream_steps=False,
stream_tokens=False,
)
# 2. Allow memory blocks time to update
time.sleep(5)
# 3. Check that the memory blocks have been collapsed
shared_block = server.agent_manager.get_block_with_label(agent_id=main_agent.id, block_label="human", actor=actor)
count_after_memory_edits = shared_block.value.count("fiddle leaf")
assert count_after_memory_edits < count_before_memory_edits
# 4. Delete agent
server.agent_manager.delete_agent(agent_id=main_agent.id, actor=actor)
with pytest.raises(NoResultFound):
server.group_manager.retrieve_group(group_id=group.id, actor=actor)
with pytest.raises(NoResultFound):
server.agent_manager.get_agent_by_id(agent_id=sleeptime_agent_id, actor=actor)
@pytest.mark.asyncio
async def test_sleeptime_edit(server, actor):
sleeptime_agent = server.create_agent(
request=CreateAgent(
name="sleeptime_agent",
agent_type="sleeptime_agent",
memory_blocks=[
CreateBlock(
label="persona",
value=get_human_text(DEFAULT_HUMAN),
limit=2000,
),
CreateBlock(
label="human",
value=get_persona_text("offline_memory_persona"),
limit=2000,
),
CreateBlock(
label="fact_block",
value="""Messi resides in the Paris.
Messi plays in the league Ligue 1.
Messi plays for the team Paris Saint-Germain.
The national team Messi plays for is the Argentina team.
Messi is also known as Leo Messi
Victor Ulloa plays for Inter Miami""",
limit=2000,
),
],
model="anthropic/claude-3-5-sonnet-20240620",
embedding="openai/text-embedding-ada-002",
enable_sleeptime=True,
),
actor=actor,
)
_ = await server.send_message_to_agent(
agent_id=sleeptime_agent.id,
actor=actor,
messages=[
MessageCreate(
role="user",
content="Messi has now moved to playing for Inter Miami",
),
],
stream_steps=False,
stream_tokens=False,
)
fact_block = server.agent_manager.get_block_with_label(agent_id=sleeptime_agent.id, block_label="fact_block", actor=actor)
print(fact_block.value)
assert fact_block.value.count("Inter Miami") > 1

View File

@@ -1,26 +1,20 @@
import time
import pytest
from sqlalchemy import delete
from letta.config import LettaConfig
from letta.orm import Provider, Step
from letta.orm.enums import JobType
from letta.orm.errors import NoResultFound
from letta.schemas.agent import CreateAgent
from letta.schemas.block import CreateBlock
from letta.schemas.enums import JobStatus, ToolRuleType
from letta.schemas.group import (
DynamicManager,
DynamicManagerUpdate,
GroupCreate,
GroupUpdate,
ManagerType,
RoundRobinManager,
SleeptimeManager,
RoundRobinManagerUpdate,
SupervisorManager,
)
from letta.schemas.message import MessageCreate
from letta.schemas.run import Run
from letta.server.server import SyncServer
@@ -189,7 +183,7 @@ async def test_modify_group_pattern(server, actor, participant_agents, manager_a
server.group_manager.modify_group(
group_id=group.id,
group_update=GroupUpdate(
manager_config=DynamicManager(
manager_config=DynamicManagerUpdate(
manager_type=ManagerType.dynamic,
manager_agent_id=manager_agent.id,
),
@@ -263,7 +257,7 @@ async def test_round_robin(server, actor, participant_agents):
group_id=group.id,
group_update=GroupUpdate(
agent_ids=[agent.id for agent in participant_agents][::-1],
manager_config=RoundRobinManager(
manager_config=RoundRobinManagerUpdate(
max_turns=max_turns,
),
),
@@ -440,184 +434,3 @@ async def test_dynamic_group_chat(server, actor, manager_agent, participant_agen
finally:
server.group_manager.delete_group(group_id=group.id, actor=actor)
@pytest.mark.asyncio
async def test_sleeptime_group_chat(server, actor):
# 0. Refresh base tools
server.tool_manager.upsert_base_tools(actor=actor)
# 1. Create sleeptime agent
main_agent = server.create_agent(
request=CreateAgent(
name="main_agent",
memory_blocks=[
CreateBlock(
label="persona",
value="You are a personal assistant that helps users with requests.",
),
CreateBlock(
label="human",
value="My favorite plant is the fiddle leaf\nMy favorite color is lavender",
),
],
# model="openai/gpt-4o-mini",
model="anthropic/claude-3-5-sonnet-20240620",
embedding="openai/text-embedding-ada-002",
enable_sleeptime=True,
),
actor=actor,
)
assert main_agent.enable_sleeptime == True
main_agent_tools = [tool.name for tool in main_agent.tools]
assert "core_memory_append" not in main_agent_tools
assert "core_memory_replace" not in main_agent_tools
assert "archival_memory_insert" not in main_agent_tools
# 2. Override frequency for test
group = server.group_manager.modify_group(
group_id=main_agent.multi_agent_group.id,
group_update=GroupUpdate(
manager_config=SleeptimeManager(
manager_agent_id=main_agent.id,
sleeptime_agent_frequency=2,
),
),
actor=actor,
)
assert group.manager_type == ManagerType.sleeptime
assert group.sleeptime_agent_frequency == 2
assert len(group.agent_ids) == 1
# 3. Verify shared blocks
sleeptime_agent_id = group.agent_ids[0]
shared_block = server.agent_manager.get_block_with_label(agent_id=main_agent.id, block_label="human", actor=actor)
agents = server.block_manager.get_agents_for_block(block_id=shared_block.id, actor=actor)
assert len(agents) == 2
assert sleeptime_agent_id in [agent.id for agent in agents]
assert main_agent.id in [agent.id for agent in agents]
# 4 Verify sleeptime agent tools
sleeptime_agent = server.agent_manager.get_agent_by_id(agent_id=sleeptime_agent_id, actor=actor)
sleeptime_agent_tools = [tool.name for tool in sleeptime_agent.tools]
assert "rethink_memory" in sleeptime_agent_tools
assert "finish_rethinking_memory" in sleeptime_agent_tools
assert "view_core_memory_with_line_numbers" in sleeptime_agent_tools
assert "core_memory_insert" in sleeptime_agent_tools
assert len([rule for rule in sleeptime_agent.tool_rules if rule.type == ToolRuleType.parent_last_tool]) > 0
# 5. Send messages and verify run ids
message_text = [
"my favorite color is orange",
"not particularly. today is a good day",
"actually my favorite color is coral",
"let's change the subject",
"actually my fav plant is the the african spear",
"indeed",
]
run_ids = []
for i, text in enumerate(message_text):
response = await server.send_message_to_agent(
agent_id=main_agent.id,
actor=actor,
messages=[
MessageCreate(
role="user",
content=text,
),
],
stream_steps=False,
stream_tokens=False,
)
assert len(response.messages) > 0
assert len(response.usage.run_ids or []) == i % 2
run_ids.extend(response.usage.run_ids or [])
jobs = server.job_manager.list_jobs(actor=actor, job_type=JobType.RUN)
runs = [Run.from_job(job) for job in jobs]
agent_runs = [run for run in runs if "agent_id" in run.metadata and run.metadata["agent_id"] == sleeptime_agent_id]
assert len(agent_runs) == len(run_ids)
for run_id in run_ids:
job = server.job_manager.get_job_by_id(job_id=run_id, actor=actor)
assert job.status == JobStatus.completed
# 6. Delete agent
server.agent_manager.delete_agent(agent_id=main_agent.id, actor=actor)
with pytest.raises(NoResultFound):
server.group_manager.retrieve_group(group_id=group.id, actor=actor)
with pytest.raises(NoResultFound):
server.agent_manager.get_agent_by_id(agent_id=sleeptime_agent_id, actor=actor)
@pytest.mark.asyncio
async def test_sleeptime_removes_redundant_information(server, actor):
# 1. set up sleep-time agent as in test_sleeptime_group_chat
server.tool_manager.upsert_base_tools(actor=actor)
main_agent = server.create_agent(
request=CreateAgent(
name="main_agent",
memory_blocks=[
CreateBlock(
label="persona",
value="You are a personal assistant that helps users with requests.",
),
CreateBlock(
label="human",
value="My favorite plant is the fiddle leaf\nMy favorite dog is the husky\nMy favorite plant is the fiddle leaf\nMy favorite plant is the fiddle leaf",
),
],
model="anthropic/claude-3-5-sonnet-20240620",
embedding="openai/text-embedding-ada-002",
enable_sleeptime=True,
),
actor=actor,
)
group = server.group_manager.modify_group(
group_id=main_agent.multi_agent_group.id,
group_update=GroupUpdate(
manager_config=SleeptimeManager(
manager_agent_id=main_agent.id,
sleeptime_agent_frequency=1,
),
),
actor=actor,
)
sleeptime_agent_id = group.agent_ids[0]
shared_block = server.agent_manager.get_block_with_label(agent_id=main_agent.id, block_label="human", actor=actor)
count_before_memory_edits = shared_block.value.count("fiddle leaf")
test_messages = ["hello there", "my favorite bird is the sparrow"]
for test_message in test_messages:
_ = await server.send_message_to_agent(
agent_id=main_agent.id,
actor=actor,
messages=[
MessageCreate(
role="user",
content=test_message,
),
],
stream_steps=False,
stream_tokens=False,
)
# 2. Allow memory blocks time to update
time.sleep(5)
# 3. Check that the memory blocks have been collapsed
shared_block = server.agent_manager.get_block_with_label(agent_id=main_agent.id, block_label="human", actor=actor)
count_after_memory_edits = shared_block.value.count("fiddle leaf")
assert count_after_memory_edits < count_before_memory_edits
# 4. Delete agent
server.agent_manager.delete_agent(agent_id=main_agent.id, actor=actor)
with pytest.raises(NoResultFound):
server.group_manager.retrieve_group(group_id=group.id, actor=actor)
with pytest.raises(NoResultFound):
server.agent_manager.get_agent_by_id(agent_id=sleeptime_agent_id, actor=actor)