This commit is contained in:
Sarah Wooders
2025-03-12 22:42:34 -07:00
9 changed files with 325 additions and 24 deletions

View File

@@ -1,19 +0,0 @@
name: Notify Letta Cloud
on:
push:
branches:
- main
jobs:
notify:
runs-on: ubuntu-latest
if: ${{ !contains(github.event.head_commit.message, '[sync-skip]') }}
steps:
- name: Trigger repository_dispatch
run: |
curl -X POST \
-H "Authorization: token ${{ secrets.SYNC_PAT }}" \
-H "Accept: application/vnd.github.v3+json" \
https://api.github.com/repos/letta-ai/letta-cloud/dispatches \
-d '{"event_type":"oss-update"}'

View File

@@ -1,6 +1,7 @@
from letta_client import Letta
from pprint import pprint
from letta_client import Letta
client = Letta(base_url="http://localhost:8283")
mcp_server_name = "everything"

View File

@@ -34,6 +34,19 @@ def resolve_type(annotation: str):
return BUILTIN_TYPES[annotation]
try:
if annotation.startswith("list["):
inner_type = annotation[len("list[") : -1]
resolve_type(inner_type)
return list
elif annotation.startswith("dict["):
inner_types = annotation[len("dict[") : -1]
key_type, value_type = inner_types.split(",")
return dict
elif annotation.startswith("tuple["):
inner_types = annotation[len("tuple[") : -1]
[resolve_type(t.strip()) for t in inner_types.split(",")]
return tuple
parsed = ast.literal_eval(annotation)
if isinstance(parsed, type):
return parsed

View File

@@ -231,13 +231,27 @@ def pydantic_model_to_json_schema(model: Type[BaseModel]) -> dict:
"""
schema = model.model_json_schema()
def clean_property(prop: dict) -> dict:
def clean_property(prop: dict, full_schema: dict) -> dict:
"""Clean up a property schema to match desired format"""
if "description" not in prop:
raise ValueError(f"Property {prop} lacks a 'description' key")
if "type" not in prop and "$ref" in prop:
prop["type"] = "object"
# Handle the case where the property is a $ref to another model
if "$ref" in prop:
# Resolve the reference to the nested model
ref_schema = resolve_ref(prop["$ref"], full_schema)
# Recursively clean the nested model
return {
"type": "object",
**clean_schema(ref_schema, full_schema),
"description": prop["description"],
}
# If it's a regular property with a direct type (e.g., string, number)
return {
"type": "string" if prop["type"] == "string" else prop["type"],
"description": prop["description"],
@@ -286,7 +300,7 @@ def pydantic_model_to_json_schema(model: Type[BaseModel]) -> dict:
"description": prop["description"],
}
else:
properties[name] = clean_property(prop)
properties[name] = clean_property(prop, full_schema)
pydantic_model_schema_dict = {
"type": "object",

View File

@@ -53,6 +53,11 @@ MODEL_LIST = [
"name": "claude-3-opus-20240229",
"context_window": 200000,
},
# latest
{
"name": "claude-3-opus-latest",
"context_window": 200000,
},
## Sonnet
# 3.0
{
@@ -69,11 +74,21 @@ MODEL_LIST = [
"name": "claude-3-5-sonnet-20241022",
"context_window": 200000,
},
# 3.5 latest
{
"name": "claude-3-5-sonnet-latest",
"context_window": 200000,
},
# 3.7
{
"name": "claude-3-7-sonnet-20250219",
"context_window": 200000,
},
# 3.7 latest
{
"name": "claude-3-7-sonnet-latest",
"context_window": 200000,
},
## Haiku
# 3.0
{
@@ -85,6 +100,11 @@ MODEL_LIST = [
"name": "claude-3-5-haiku-20241022",
"context_window": 200000,
},
# 3.5 latest
{
"name": "claude-3-5-haiku-latest",
"context_window": 200000,
},
]
DUMMY_FIRST_USER_MESSAGE = "User initializing bootup sequence."

View File

@@ -6,5 +6,6 @@ AZURE_MODEL_TO_CONTEXT_LENGTH = {
"gpt-35-turbo-0125": 16385,
"gpt-4-0613": 8192,
"gpt-4o-mini-2024-07-18": 128000,
"gpt-4o-mini": 128000,
"gpt-4o": 128000,
}

View File

@@ -214,3 +214,62 @@ def test_coerce_dict_args_non_parseable_list_or_dict():
with pytest.raises(ValueError, match="Failed to coerce argument 'bad_list' to list"):
coerce_dict_args_by_annotations(function_args, annotations)
def test_coerce_dict_args_with_complex_list_annotation():
"""
Test coercion when list with type annotation (e.g., list[int]) is used.
"""
annotations = {"a": "list[int]"}
function_args = {"a": "[1, 2, 3]"}
coerced_args = coerce_dict_args_by_annotations(function_args, annotations)
assert coerced_args["a"] == [1, 2, 3]
def test_coerce_dict_args_with_complex_dict_annotation():
"""
Test coercion when dict with type annotation (e.g., dict[str, int]) is used.
"""
annotations = {"a": "dict[str, int]"}
function_args = {"a": '{"x": 1, "y": 2}'}
coerced_args = coerce_dict_args_by_annotations(function_args, annotations)
assert coerced_args["a"] == {"x": 1, "y": 2}
def test_coerce_dict_args_unsupported_complex_annotation():
"""
If an unsupported complex annotation is used (e.g., a custom class),
a ValueError should be raised.
"""
annotations = {"f": "CustomClass[int]"}
function_args = {"f": "CustomClass(42)"}
with pytest.raises(ValueError, match="Failed to coerce argument 'f' to CustomClass\[int\]: Unsupported annotation: CustomClass\[int\]"):
coerce_dict_args_by_annotations(function_args, annotations)
def test_coerce_dict_args_with_nested_complex_annotation():
"""
Test coercion with complex nested types like list[dict[str, int]].
"""
annotations = {"a": "list[dict[str, int]]"}
function_args = {"a": '[{"x": 1}, {"y": 2}]'}
coerced_args = coerce_dict_args_by_annotations(function_args, annotations)
assert coerced_args["a"] == [{"x": 1}, {"y": 2}]
def test_coerce_dict_args_with_default_arguments():
"""
Test coercion with default arguments, where some arguments have defaults in the source code.
"""
annotations = {"a": "int", "b": "str"}
function_args = {"a": "42"}
function_args.setdefault("b", "hello") # Setting the default value for 'b'
coerced_args = coerce_dict_args_by_annotations(function_args, annotations)
assert coerced_args["a"] == 42
assert coerced_args["b"] == "hello"

View File

@@ -98,3 +98,216 @@ def test_recall(client, agent_obj):
# Conversation search
result = base_functions.conversation_search(agent_obj, "banana")
assert keyword in result
# This test is nondeterministic, so we retry until we get the perfect behavior from the LLM
@retry_until_success(max_attempts=2, sleep_time_seconds=2)
def test_send_message_to_agent(client, agent_obj, other_agent_obj):
secret_word = "banana"
# Encourage the agent to send a message to the other agent_obj with the secret string
client.send_message(
agent_id=agent_obj.agent_state.id,
role="user",
message=f"Use your tool to send a message to another agent with id {other_agent_obj.agent_state.id} to share the secret word: {secret_word}!",
)
# Conversation search the other agent
messages = client.get_messages(other_agent_obj.agent_state.id)
# Check for the presence of system message
for m in reversed(messages):
print(f"\n\n {other_agent_obj.agent_state.id} -> {m.model_dump_json(indent=4)}")
if isinstance(m, SystemMessage):
assert secret_word in m.content
break
# Search the sender agent for the response from another agent
in_context_messages = agent_obj.agent_manager.get_in_context_messages(agent_id=agent_obj.agent_state.id, actor=agent_obj.user)
found = False
target_snippet = f"{other_agent_obj.agent_state.id} said:"
for m in in_context_messages:
if target_snippet in m.text:
found = True
break
# Compute the joined string first
joined_messages = "\n".join([m.text for m in in_context_messages[1:]])
print(f"In context messages of the sender agent (without system):\n\n{joined_messages}")
if not found:
raise Exception(f"Was not able to find an instance of the target snippet: {target_snippet}")
# Test that the agent can still receive messages fine
response = client.send_message(agent_id=agent_obj.agent_state.id, role="user", message="So what did the other agent say?")
print(response.messages)
@retry_until_success(max_attempts=2, sleep_time_seconds=2)
def test_send_message_to_agents_with_tags_simple(client):
worker_tags = ["worker", "user-456"]
# Clean up first from possibly failed tests
prev_worker_agents = client.server.agent_manager.list_agents(client.user, tags=worker_tags, match_all_tags=True)
for agent in prev_worker_agents:
client.delete_agent(agent.id)
secret_word = "banana"
# Create "manager" agent
send_message_to_agents_matching_all_tags_tool_id = client.get_tool_id(name="send_message_to_agents_matching_all_tags")
manager_agent_state = client.create_agent(tool_ids=[send_message_to_agents_matching_all_tags_tool_id])
manager_agent = client.server.load_agent(agent_id=manager_agent_state.id, actor=client.user)
# Create 3 non-matching worker agents (These should NOT get the message)
worker_agents = []
worker_tags = ["worker", "user-123"]
for _ in range(3):
worker_agent_state = client.create_agent(include_multi_agent_tools=False, tags=worker_tags)
worker_agent = client.server.load_agent(agent_id=worker_agent_state.id, actor=client.user)
worker_agents.append(worker_agent)
# Create 3 worker agents that should get the message
worker_agents = []
worker_tags = ["worker", "user-456"]
for _ in range(3):
worker_agent_state = client.create_agent(include_multi_agent_tools=False, tags=worker_tags)
worker_agent = client.server.load_agent(agent_id=worker_agent_state.id, actor=client.user)
worker_agents.append(worker_agent)
# Encourage the manager to send a message to the other agent_obj with the secret string
response = client.send_message(
agent_id=manager_agent.agent_state.id,
role="user",
message=f"Send a message to all agents with tags {worker_tags} informing them of the secret word: {secret_word}!",
)
for m in response.messages:
if isinstance(m, ToolReturnMessage):
tool_response = eval(json.loads(m.tool_return)["message"])
print(f"\n\nManager agent tool response: \n{tool_response}\n\n")
assert len(tool_response) == len(worker_agents)
# We can break after this, the ToolReturnMessage after is not related
break
# Conversation search the worker agents
for agent in worker_agents:
messages = client.get_messages(agent.agent_state.id)
# Check for the presence of system message
for m in reversed(messages):
print(f"\n\n {agent.agent_state.id} -> {m.model_dump_json(indent=4)}")
if isinstance(m, SystemMessage):
assert secret_word in m.content
break
# Test that the agent can still receive messages fine
response = client.send_message(agent_id=manager_agent.agent_state.id, role="user", message="So what did the other agents say?")
print("Manager agent followup message: \n\n" + "\n".join([str(m) for m in response.messages]))
# Clean up agents
client.delete_agent(manager_agent_state.id)
for agent in worker_agents:
client.delete_agent(agent.agent_state.id)
# This test is nondeterministic, so we retry until we get the perfect behavior from the LLM
@retry_until_success(max_attempts=2, sleep_time_seconds=2)
def test_send_message_to_agents_with_tags_complex_tool_use(client, roll_dice_tool):
worker_tags = ["dice-rollers"]
# Clean up first from possibly failed tests
prev_worker_agents = client.server.agent_manager.list_agents(client.user, tags=worker_tags, match_all_tags=True)
for agent in prev_worker_agents:
client.delete_agent(agent.id)
# Create "manager" agent
send_message_to_agents_matching_all_tags_tool_id = client.get_tool_id(name="send_message_to_agents_matching_all_tags")
manager_agent_state = client.create_agent(tool_ids=[send_message_to_agents_matching_all_tags_tool_id])
manager_agent = client.server.load_agent(agent_id=manager_agent_state.id, actor=client.user)
# Create 3 worker agents
worker_agents = []
worker_tags = ["dice-rollers"]
for _ in range(2):
worker_agent_state = client.create_agent(include_multi_agent_tools=False, tags=worker_tags, tool_ids=[roll_dice_tool.id])
worker_agent = client.server.load_agent(agent_id=worker_agent_state.id, actor=client.user)
worker_agents.append(worker_agent)
# Encourage the manager to send a message to the other agent_obj with the secret string
broadcast_message = f"Send a message to all agents with tags {worker_tags} asking them to roll a dice for you!"
response = client.send_message(
agent_id=manager_agent.agent_state.id,
role="user",
message=broadcast_message,
)
for m in response.messages:
if isinstance(m, ToolReturnMessage):
tool_response = eval(json.loads(m.tool_return)["message"])
print(f"\n\nManager agent tool response: \n{tool_response}\n\n")
assert len(tool_response) == len(worker_agents)
# We can break after this, the ToolReturnMessage after is not related
break
# Test that the agent can still receive messages fine
response = client.send_message(agent_id=manager_agent.agent_state.id, role="user", message="So what did the other agents say?")
print("Manager agent followup message: \n\n" + "\n".join([str(m) for m in response.messages]))
# Clean up agents
client.delete_agent(manager_agent_state.id)
for agent in worker_agents:
client.delete_agent(agent.agent_state.id)
@retry_until_success(max_attempts=5, sleep_time_seconds=2)
def test_agents_async_simple(client):
"""
Test two agents with multi-agent tools sending messages back and forth to count to 5.
The chain is started by prompting one of the agents.
"""
# Cleanup from potentially failed previous runs
existing_agents = client.server.agent_manager.list_agents(client.user)
for agent in existing_agents:
client.delete_agent(agent.id)
# Create two agents with multi-agent tools
send_message_to_agent_async_tool_id = client.get_tool_id(name="send_message_to_agent_async")
memory_a = ChatMemory(
human="Chad - I'm interested in hearing poem.",
persona="You are an AI agent that can communicate with your agent buddy using `send_message_to_agent_async`, who has some great poem ideas (so I've heard).",
)
charles_state = client.create_agent(name="charles", memory=memory_a, tool_ids=[send_message_to_agent_async_tool_id])
charles = client.server.load_agent(agent_id=charles_state.id, actor=client.user)
memory_b = ChatMemory(
human="No human - you are to only communicate with the other AI agent.",
persona="You are an AI agent that can communicate with your agent buddy using `send_message_to_agent_async`, who is interested in great poem ideas.",
)
sarah_state = client.create_agent(name="sarah", memory=memory_b, tool_ids=[send_message_to_agent_async_tool_id])
# Start the count chain with Agent1
initial_prompt = f"I want you to talk to the other agent with ID {sarah_state.id} using `send_message_to_agent_async`. Specifically, I want you to ask him for a poem idea, and then craft a poem for me."
client.send_message(
agent_id=charles.agent_state.id,
role="user",
message=initial_prompt,
)
found_in_charles = wait_for_incoming_message(
client=client,
agent_id=charles_state.id,
substring="[Incoming message from agent with ID",
max_wait_seconds=10,
sleep_interval=0.5,
)
assert found_in_charles, "Charles never received the system message from Sarah (timed out)."
found_in_sarah = wait_for_incoming_message(
client=client,
agent_id=sarah_state.id,
substring="[Incoming message from agent with ID",
max_wait_seconds=10,
sleep_interval=0.5,
)
assert found_in_sarah, "Sarah never received the system message from Charles (timed out)."

View File

@@ -8,10 +8,9 @@ def adjust_menu_prices(percentage: float) -> str:
str: A formatted string summarizing the price adjustments.
"""
import cowsay
from tqdm import tqdm
from core.menu import Menu, MenuItem # Import a class from the codebase
from core.utils import format_currency # Use a utility function to test imports
from tqdm import tqdm
if not isinstance(percentage, (int, float)):
raise TypeError("percentage must be a number")