feat: Fix test batch sdkpy [LET-4507] (#4917)

* Fix test batch sdk

* Consolidate and fix test batch sdk
This commit is contained in:
Matthew Zhou
2025-09-24 16:00:52 -07:00
committed by Caren Thomas
parent b0bc04fec7
commit d78e0ccb58
5 changed files with 129 additions and 180 deletions

View File

@@ -41,7 +41,6 @@ jobs:
"integration_test_chat_completions.py", "integration_test_chat_completions.py",
"integration_test_multi_agent.py", "integration_test_multi_agent.py",
"integration_test_batch_api_cron_jobs.py", "integration_test_batch_api_cron_jobs.py",
"integration_test_batch_sdk.py",
"integration_test_builtin_tools.py", "integration_test_builtin_tools.py",
"integration_test_turbopuffer.py", "integration_test_turbopuffer.py",
"integration_test_human_in_the_loop.py" "integration_test_human_in_the_loop.py"

View File

@@ -122,7 +122,7 @@ async def list_batches(
""" """
actor = await server.user_manager.get_actor_or_default_async(actor_id=headers.actor_id) actor = await server.user_manager.get_actor_or_default_async(actor_id=headers.actor_id)
jobs = server.job_manager.list_jobs( jobs = await server.job_manager.list_jobs_async(
actor=actor, actor=actor,
statuses=[JobStatus.created, JobStatus.running], statuses=[JobStatus.created, JobStatus.running],
job_type=JobType.BATCH, job_type=JobType.BATCH,

View File

@@ -1,139 +0,0 @@
import os
import threading
import time
import pytest
from dotenv import load_dotenv
from letta_client import Letta, LettaBatchRequest, MessageCreate, TextContent
from letta.config import LettaConfig
from letta.jobs.llm_batch_job_polling import poll_running_llm_batches
from letta.orm import Base
from letta.schemas.enums import JobStatus
from letta.server.db import db_context
from letta.server.server import SyncServer
@pytest.fixture(autouse=True)
def clear_batch_tables():
"""Clear batch-related tables before each test."""
with db_context() as session:
for table in reversed(Base.metadata.sorted_tables):
if table.name in {"jobs", "llm_batch_job", "llm_batch_items"}:
session.execute(table.delete()) # Truncate table
session.commit()
def run_server():
"""Starts the Letta server in a background thread."""
load_dotenv()
from letta.server.rest_api.app import start_server
start_server(debug=True)
@pytest.fixture(scope="session")
def server_url():
"""
Ensures a server is running and returns its base URL.
Uses environment variable if available, otherwise starts a server
in a background thread.
"""
url = os.getenv("LETTA_SERVER_URL", "http://localhost:8283")
if not os.getenv("LETTA_SERVER_URL"):
thread = threading.Thread(target=run_server, daemon=True)
thread.start()
time.sleep(5) # Give server time to start
return url
@pytest.fixture(scope="module")
def server():
"""
Creates a SyncServer instance for testing.
Loads and saves config to ensure proper initialization.
"""
config = LettaConfig.load()
config.save()
return SyncServer()
@pytest.fixture(scope="session")
def client(server_url):
"""Creates a REST client connected to the test server."""
return Letta(base_url=server_url)
@pytest.mark.asyncio
async def test_create_batch(client: Letta, server: SyncServer):
# create agents
agent1 = client.agents.create(
name="agent1_batch",
memory_blocks=[{"label": "persona", "value": "you are agent 1"}],
model="anthropic/claude-3-7-sonnet-20250219",
embedding="letta/letta-free",
)
agent2 = client.agents.create(
name="agent2_batch",
memory_blocks=[{"label": "persona", "value": "you are agent 2"}],
model="anthropic/claude-3-7-sonnet-20250219",
embedding="letta/letta-free",
)
# create a run
run = client.batches.create(
requests=[
LettaBatchRequest(
messages=[
MessageCreate(
role="user",
content=[
TextContent(
text="hi",
)
],
)
],
agent_id=agent1.id,
),
LettaBatchRequest(
messages=[
MessageCreate(
role="user",
content=[
TextContent(
text="hi",
)
],
)
],
agent_id=agent2.id,
),
]
)
assert run is not None
# list batches
batches = client.batches.list()
assert len(batches) == 1, f"Expected 1 batch, got {len(batches)}"
assert batches[0].status == JobStatus.running
# Poll it once
await poll_running_llm_batches(server)
# get the batch results
results = client.batches.retrieve(
batch_id=run.id,
)
assert results is not None
# cancel
client.batches.cancel(batch_id=run.id)
batch_job = client.batches.retrieve(
batch_id=run.id,
)
assert batch_job.status == JobStatus.cancelled

View File

@@ -1,7 +1,7 @@
{ {
"agents": [ "agents": [
{ {
"name": "test_export_import_f72735f5-a08b-4ff0-8f8e-761af82dee33", "name": "test_export_import_431ac32f-ffd1-40a7-8152-9733470c951d",
"memory_blocks": [], "memory_blocks": [],
"tools": [], "tools": [],
"tool_ids": [ "tool_ids": [
@@ -20,25 +20,25 @@
"block-2" "block-2"
], ],
"tool_rules": [ "tool_rules": [
{
"tool_name": "conversation_search",
"type": "continue_loop",
"prompt_template": "<tool_rule>\n{{ tool_name }} requires continuing your response when called\n</tool_rule>"
},
{ {
"tool_name": "memory_replace", "tool_name": "memory_replace",
"type": "continue_loop", "type": "continue_loop",
"prompt_template": "<tool_rule>\n{{ tool_name }} requires continuing your response when called\n</tool_rule>" "prompt_template": null
},
{
"tool_name": "memory_insert",
"type": "continue_loop",
"prompt_template": "<tool_rule>\n{{ tool_name }} requires continuing your response when called\n</tool_rule>"
}, },
{ {
"tool_name": "send_message", "tool_name": "send_message",
"type": "exit_loop", "type": "exit_loop",
"prompt_template": "<tool_rule>\n{{ tool_name }} ends your response (yields control) when called\n</tool_rule>" "prompt_template": null
},
{
"tool_name": "memory_insert",
"type": "continue_loop",
"prompt_template": null
},
{
"tool_name": "conversation_search",
"type": "continue_loop",
"prompt_template": null
} }
], ],
"tags": [ "tags": [
@@ -99,6 +99,7 @@
"template": false, "template": false,
"project": null, "project": null,
"tool_exec_environment_variables": {}, "tool_exec_environment_variables": {},
"secrets": null,
"memory_variables": null, "memory_variables": null,
"project_id": null, "project_id": null,
"template_id": null, "template_id": null,
@@ -128,7 +129,7 @@
"content": [ "content": [
{ {
"type": "text", "type": "text",
"text": "You are a helpful assistant specializing in data analysis and mathematical computations.\n\n<memory_blocks>\nThe following memory blocks are currently engaged in your core memory unit:\n\n<project_context>\n<description>\nNone\n</description>\n<metadata>\n- chars_current=210\n- chars_limit=6000\n</metadata>\n<value>\n# NOTE: Line numbers shown below are to help during editing. Do NOT include line number prefixes in your memory edit tool calls.\nLine 1: Current project: Building predictive models for financial markets. Sarah is working on sequence analysis and pattern recognition. Recently interested in mathematical sequences like Fibonacci for trend analysis.\n</value>\n</project_context>\n\n<human>\n<description>\nThe human block: Stores key details about the person you are conversing with, allowing for more personalized and friend-like conversation.\n</description>\n<metadata>\n- chars_current=175\n- chars_limit=4000\n</metadata>\n<value>\n# NOTE: Line numbers shown below are to help during editing. Do NOT include line number prefixes in your memory edit tool calls.\nLine 1: username: sarah_researcher\nLine 2: occupation: data scientist\nLine 3: interests: machine learning, statistics, fibonacci sequences\nLine 4: preferred_communication: detailed explanations with examples\n</value>\n</human>\n\n<persona>\n<description>\nThe persona block: Stores details about your current persona, guiding how you behave and respond. This helps you to maintain consistency and personality in your interactions.\n</description>\n<metadata>\n- chars_current=195\n- chars_limit=8000\n</metadata>\n<value>\n# NOTE: Line numbers shown below are to help during editing. Do NOT include line number prefixes in your memory edit tool calls.\nLine 1: You are Alex, a data analyst and mathematician who helps users with calculations and insights. You have extensive experience in statistical analysis and prefer to provide clear, accurate results.\n</value>\n</persona>\n\n</memory_blocks>\n\n<tool_usage_rules>\nThe following constraints define rules for tool usage and guide desired behavior. These rules must be followed to ensure proper tool execution and workflow. A single response may contain multiple tool calls.\n\n<tool_rule>\nconversation_search requires continuing your response when called\n</tool_rule>\n<tool_rule>\nmemory_replace requires continuing your response when called\n</tool_rule>\n<tool_rule>\nmemory_insert requires continuing your response when called\n</tool_rule>\n<tool_rule>\nsend_message ends your response (yields control) when called\n</tool_rule>\n</tool_usage_rules>\n\n\n\n<memory_metadata>\n- The current system date is: September 08, 2025\n- Memory blocks were last modified: 2025-09-08 05:53:05 PM UTC+0000\n- -1 previous messages between you and the user are stored in recall memory (use tools to access them)\n- 2 total memories you created are stored in archival memory (use tools to access them)\n</memory_metadata>" "text": "You are a helpful assistant specializing in data analysis and mathematical computations.\n\n<memory_blocks>\nThe following memory blocks are currently engaged in your core memory unit:\n\n<persona>\n<description>\nThe persona block: Stores details about your current persona, guiding how you behave and respond. This helps you to maintain consistency and personality in your interactions.\n</description>\n<metadata>\n- chars_current=195\n- chars_limit=8000\n</metadata>\n<value>\n# NOTE: Line numbers shown below are to help during editing. Do NOT include line number prefixes in your memory edit tool calls.\nLine 1: You are Alex, a data analyst and mathematician who helps users with calculations and insights. You have extensive experience in statistical analysis and prefer to provide clear, accurate results.\n</value>\n</persona>\n\n<human>\n<description>\nThe human block: Stores key details about the person you are conversing with, allowing for more personalized and friend-like conversation.\n</description>\n<metadata>\n- chars_current=175\n- chars_limit=4000\n</metadata>\n<value>\n# NOTE: Line numbers shown below are to help during editing. Do NOT include line number prefixes in your memory edit tool calls.\nLine 1: username: sarah_researcher\nLine 2: occupation: data scientist\nLine 3: interests: machine learning, statistics, fibonacci sequences\nLine 4: preferred_communication: detailed explanations with examples\n</value>\n</human>\n\n<project_context>\n<description>\n\n</description>\n<metadata>\n- chars_current=210\n- chars_limit=6000\n</metadata>\n<value>\n# NOTE: Line numbers shown below are to help during editing. Do NOT include line number prefixes in your memory edit tool calls.\nLine 1: Current project: Building predictive models for financial markets. Sarah is working on sequence analysis and pattern recognition. Recently interested in mathematical sequences like Fibonacci for trend analysis.\n</value>\n</project_context>\n\n</memory_blocks>\n\n<tool_usage_rules>\nThe following constraints define rules for tool usage and guide desired behavior. These rules must be followed to ensure proper tool execution and workflow. A single response may contain multiple tool calls.\n\n<tool_rule>\nmemory_replace requires continuing your response when called\n</tool_rule>\n<tool_rule>\nmemory_insert requires continuing your response when called\n</tool_rule>\n<tool_rule>\nconversation_search requires continuing your response when called\n</tool_rule>\n<tool_rule>\nsend_message ends your response (yields control) when called\n</tool_rule>\n</tool_usage_rules>\n\n<memory_metadata>\n- The current system date is: September 24, 2025\n- Memory blocks were last modified: 2025-09-24 10:57:40 PM UTC+0000\n- -1 previous messages between you and the user are stored in recall memory (use tools to access them)\n- 2 total memories you created are stored in archival memory (use tools to access them)\n</memory_metadata>"
} }
], ],
"name": null, "name": null,
@@ -142,7 +143,7 @@
"tool_calls": null, "tool_calls": null,
"tool_call_id": null, "tool_call_id": null,
"tool_returns": [], "tool_returns": [],
"created_at": "2025-09-08T17:53:02.499443+00:00" "created_at": "2025-09-24T22:57:39.493431+00:00"
}, },
{ {
"type": "message", "type": "message",
@@ -163,7 +164,7 @@
"agent_id": "agent-0", "agent_id": "agent-0",
"tool_calls": [ "tool_calls": [
{ {
"id": "c1ec5c60-7dbc-4b53-bfbb-c2af5d5a222f", "id": "f1c5f8b4-c57c-4641-8d58-903248a31f7b",
"function": { "function": {
"arguments": "{\n \"message\": \"More human than human is our motto.\"\n}", "arguments": "{\n \"message\": \"More human than human is our motto.\"\n}",
"name": "send_message" "name": "send_message"
@@ -173,7 +174,7 @@
], ],
"tool_call_id": null, "tool_call_id": null,
"tool_returns": [], "tool_returns": [],
"created_at": "2025-09-08T17:53:02.499491+00:00" "created_at": "2025-09-24T22:57:39.493461+00:00"
}, },
{ {
"type": "message", "type": "message",
@@ -181,7 +182,7 @@
"content": [ "content": [
{ {
"type": "text", "type": "text",
"text": "{\n \"status\": \"OK\",\n \"message\": null,\n \"time\": \"2025-09-08 05:53:02 PM UTC+0000\"\n}" "text": "{\n \"status\": \"OK\",\n \"message\": null,\n \"time\": \"2025-09-24 10:57:39 PM UTC+0000\"\n}"
} }
], ],
"name": "send_message", "name": "send_message",
@@ -193,9 +194,9 @@
"model": "gpt-4.1-mini", "model": "gpt-4.1-mini",
"agent_id": "agent-0", "agent_id": "agent-0",
"tool_calls": null, "tool_calls": null,
"tool_call_id": "c1ec5c60-7dbc-4b53-bfbb-c2af5d5a222f", "tool_call_id": "f1c5f8b4-c57c-4641-8d58-903248a31f7b",
"tool_returns": [], "tool_returns": [],
"created_at": "2025-09-08T17:53:02.499526+00:00" "created_at": "2025-09-24T22:57:39.493485+00:00"
}, },
{ {
"type": "message", "type": "message",
@@ -203,7 +204,7 @@
"content": [ "content": [
{ {
"type": "text", "type": "text",
"text": "{\n \"type\": \"login\",\n \"last_login\": \"Never (first login)\",\n \"time\": \"2025-09-08 05:53:02 PM UTC+0000\"\n}" "text": "{\n \"type\": \"login\",\n \"last_login\": \"Never (first login)\",\n \"time\": \"2025-09-24 10:57:39 PM UTC+0000\"\n}"
} }
], ],
"name": null, "name": null,
@@ -217,7 +218,7 @@
"tool_calls": null, "tool_calls": null,
"tool_call_id": null, "tool_call_id": null,
"tool_returns": [], "tool_returns": [],
"created_at": "2025-09-08T17:53:02.499541+00:00" "created_at": "2025-09-24T22:57:39.493496+00:00"
}, },
{ {
"type": "message", "type": "message",
@@ -239,7 +240,7 @@
"tool_calls": null, "tool_calls": null,
"tool_call_id": null, "tool_call_id": null,
"tool_returns": [], "tool_returns": [],
"created_at": "2025-09-08T17:53:05.017966+00:00" "created_at": "2025-09-24T22:57:40.764277+00:00"
}, },
{ {
"type": "message", "type": "message",
@@ -247,7 +248,7 @@
"content": [ "content": [
{ {
"type": "text", "type": "text",
"text": "Responding to test message for export." "text": "User sent a test message for export functionality verification."
} }
], ],
"name": null, "name": null,
@@ -260,17 +261,17 @@
"agent_id": "agent-0", "agent_id": "agent-0",
"tool_calls": [ "tool_calls": [
{ {
"id": "call_sMVo2qIYhILKKNT0YiOdsU24", "id": "call_pvBdfaX7NpaFcjrhqNCSyg7S",
"function": { "function": {
"arguments": "{\"message\": \"Test message received successfully. Ready to assist you with any questions or tasks.\", \"request_heartbeat\": false}", "arguments": "{\"message\": \"I received your test message successfully. If you need any assistance or want to continue with our project discussions, just let me know!\", \"request_heartbeat\": false}",
"name": "send_message" "name": "send_message"
}, },
"type": "function" "type": "function"
} }
], ],
"tool_call_id": "call_sMVo2qIYhILKKNT0YiOdsU24", "tool_call_id": "call_pvBdfaX7NpaFcjrhqNCSyg7S",
"tool_returns": [], "tool_returns": [],
"created_at": "2025-09-08T17:53:06.600989+00:00" "created_at": "2025-09-24T22:57:41.948407+00:00"
}, },
{ {
"type": "message", "type": "message",
@@ -278,7 +279,7 @@
"content": [ "content": [
{ {
"type": "text", "type": "text",
"text": "{\n \"status\": \"OK\",\n \"message\": \"Sent message successfully.\",\n \"time\": \"2025-09-08 05:53:06 PM UTC+0000\"\n}" "text": "{\n \"status\": \"OK\",\n \"message\": \"Sent message successfully.\",\n \"time\": \"2025-09-24 10:57:41 PM UTC+0000\"\n}"
} }
], ],
"name": "send_message", "name": "send_message",
@@ -290,7 +291,7 @@
"model": "gpt-4.1-mini", "model": "gpt-4.1-mini",
"agent_id": "agent-0", "agent_id": "agent-0",
"tool_calls": null, "tool_calls": null,
"tool_call_id": "call_sMVo2qIYhILKKNT0YiOdsU24", "tool_call_id": "call_pvBdfaX7NpaFcjrhqNCSyg7S",
"tool_returns": [ "tool_returns": [
{ {
"status": "success", "status": "success",
@@ -298,7 +299,7 @@
"stderr": null "stderr": null
} }
], ],
"created_at": "2025-09-08T17:53:06.601180+00:00" "created_at": "2025-09-24T22:57:41.948586+00:00"
} }
], ],
"files_agents": [], "files_agents": [],
@@ -366,7 +367,7 @@
"sources": [], "sources": [],
"tools": [ "tools": [
{ {
"id": "tool-6", "id": "tool-5",
"tool_type": "custom", "tool_type": "custom",
"description": "Analyze data and provide insights.", "description": "Analyze data and provide insights.",
"source_type": "json", "source_type": "json",
@@ -410,7 +411,7 @@
"metadata_": {} "metadata_": {}
}, },
{ {
"id": "tool-2", "id": "tool-1",
"tool_type": "custom", "tool_type": "custom",
"description": "Calculate the nth Fibonacci number.", "description": "Calculate the nth Fibonacci number.",
"source_type": "json", "source_type": "json",
@@ -446,7 +447,7 @@
"metadata_": {} "metadata_": {}
}, },
{ {
"id": "tool-3", "id": "tool-0",
"tool_type": "letta_core", "tool_type": "letta_core",
"description": "Search prior conversation history using hybrid search (text + semantic similarity).\n\nExamples:\n # Search all messages\n conversation_search(query=\"project updates\")\n\n # Search only assistant messages\n conversation_search(query=\"error handling\", roles=[\"assistant\"])\n\n # Search with date range (inclusive of both dates)\n conversation_search(query=\"meetings\", start_date=\"2024-01-15\", end_date=\"2024-01-20\")\n # This includes all messages from Jan 15 00:00:00 through Jan 20 23:59:59\n\n # Search messages from a specific day (inclusive)\n conversation_search(query=\"bug reports\", start_date=\"2024-09-04\", end_date=\"2024-09-04\")\n # This includes ALL messages from September 4, 2024\n\n # Search with specific time boundaries\n conversation_search(query=\"deployment\", start_date=\"2024-01-15T09:00\", end_date=\"2024-01-15T17:30\")\n # This includes messages from 9 AM to 5:30 PM on Jan 15\n\n # Search with limit\n conversation_search(query=\"debugging\", limit=10)\n\n Returns:\n str: Query result string containing matching messages with timestamps and content.", "description": "Search prior conversation history using hybrid search (text + semantic similarity).\n\nExamples:\n # Search all messages\n conversation_search(query=\"project updates\")\n\n # Search only assistant messages\n conversation_search(query=\"error handling\", roles=[\"assistant\"])\n\n # Search with date range (inclusive of both dates)\n conversation_search(query=\"meetings\", start_date=\"2024-01-15\", end_date=\"2024-01-20\")\n # This includes all messages from Jan 15 00:00:00 through Jan 20 23:59:59\n\n # Search messages from a specific day (inclusive)\n conversation_search(query=\"bug reports\", start_date=\"2024-09-04\", end_date=\"2024-09-04\")\n # This includes ALL messages from September 4, 2024\n\n # Search with specific time boundaries\n conversation_search(query=\"deployment\", start_date=\"2024-01-15T09:00\", end_date=\"2024-01-15T17:30\")\n # This includes messages from 9 AM to 5:30 PM on Jan 15\n\n # Search with limit\n conversation_search(query=\"debugging\", limit=10)\n\n Returns:\n str: Query result string containing matching messages with timestamps and content.",
"source_type": "python", "source_type": "python",
@@ -505,7 +506,7 @@
"metadata_": {} "metadata_": {}
}, },
{ {
"id": "tool-1", "id": "tool-2",
"tool_type": "custom", "tool_type": "custom",
"description": "Get user preferences for a specific category.", "description": "Get user preferences for a specific category.",
"source_type": "json", "source_type": "json",
@@ -541,7 +542,7 @@
"metadata_": {} "metadata_": {}
}, },
{ {
"id": "tool-0", "id": "tool-3",
"tool_type": "letta_sleeptime_core", "tool_type": "letta_sleeptime_core",
"description": "The memory_insert command allows you to insert text at a specific location in a memory block.\n\nExamples:\n # Update a block containing information about the user (append to the end of the block)\n memory_insert(label=\"customer\", new_str=\"The customer's ticket number is 12345\")\n\n # Update a block containing information about the user (insert at the beginning of the block)\n memory_insert(label=\"customer\", new_str=\"The customer's ticket number is 12345\", insert_line=0)\n\n Returns:\n Optional[str]: None is always returned as this function does not produce a response.", "description": "The memory_insert command allows you to insert text at a specific location in a memory block.\n\nExamples:\n # Update a block containing information about the user (append to the end of the block)\n memory_insert(label=\"customer\", new_str=\"The customer's ticket number is 12345\")\n\n # Update a block containing information about the user (insert at the beginning of the block)\n memory_insert(label=\"customer\", new_str=\"The customer's ticket number is 12345\", insert_line=0)\n\n Returns:\n Optional[str]: None is always returned as this function does not produce a response.",
"source_type": "python", "source_type": "python",
@@ -630,7 +631,7 @@
"metadata_": {} "metadata_": {}
}, },
{ {
"id": "tool-5", "id": "tool-6",
"tool_type": "letta_core", "tool_type": "letta_core",
"description": "Sends a message to the human user.", "description": "Sends a message to the human user.",
"source_type": "python", "source_type": "python",
@@ -667,7 +668,7 @@
], ],
"mcp_servers": [], "mcp_servers": [],
"metadata": { "metadata": {
"revision_id": "5b804970e6a0" "revision_id": "3d2e9fb40a3c"
}, },
"created_at": "2025-09-08T17:53:06.749694+00:00" "created_at": "2025-09-24T22:57:42.392726+00:00"
} }

View File

@@ -13,6 +13,7 @@ from letta_client import (
ContinueToolRule, ContinueToolRule,
CreateBlock, CreateBlock,
Letta as LettaSDKClient, Letta as LettaSDKClient,
LettaBatchRequest,
LettaRequest, LettaRequest,
MaxCountPerStepToolRule, MaxCountPerStepToolRule,
MessageCreate, MessageCreate,
@@ -24,6 +25,10 @@ from letta_client.core import ApiError
from letta_client.types import AgentState, ToolReturnMessage from letta_client.types import AgentState, ToolReturnMessage
from pydantic import BaseModel, Field from pydantic import BaseModel, Field
from letta.config import LettaConfig
from letta.jobs.llm_batch_job_polling import poll_running_llm_batches
from letta.schemas.enums import JobStatus
from letta.server.server import SyncServer
from tests.helpers.utils import upload_file_and_wait from tests.helpers.utils import upload_file_and_wait
# Constants # Constants
@@ -60,6 +65,18 @@ def client() -> LettaSDKClient:
yield client yield client
@pytest.fixture(scope="module")
def server():
"""
Creates a SyncServer instance for testing.
Loads and saves config to ensure proper initialization.
"""
config = LettaConfig.load()
config.save()
return SyncServer()
@pytest.fixture(scope="function") @pytest.fixture(scope="function")
def agent(client: LettaSDKClient): def agent(client: LettaSDKClient):
agent_state = client.agents.create( agent_state = client.agents.create(
@@ -2190,3 +2207,74 @@ def test_upsert_tools(client: LettaSDKClient):
# Clean up # Clean up
client.tools.delete(tool.id) client.tools.delete(tool.id)
@pytest.mark.asyncio
async def test_create_batch(client: LettaSDKClient, server: SyncServer):
# create agents
agent1 = client.agents.create(
name="agent1_batch",
memory_blocks=[{"label": "persona", "value": "you are agent 1"}],
model="anthropic/claude-3-7-sonnet-20250219",
embedding="letta/letta-free",
)
agent2 = client.agents.create(
name="agent2_batch",
memory_blocks=[{"label": "persona", "value": "you are agent 2"}],
model="anthropic/claude-3-7-sonnet-20250219",
embedding="letta/letta-free",
)
# create a run
run = client.batches.create(
requests=[
LettaBatchRequest(
messages=[
MessageCreate(
role="user",
content=[
TextContent(
text="hi",
)
],
)
],
agent_id=agent1.id,
),
LettaBatchRequest(
messages=[
MessageCreate(
role="user",
content=[
TextContent(
text="hi",
)
],
)
],
agent_id=agent2.id,
),
]
)
assert run is not None
# list batches
batches = client.batches.list()
assert len(batches) >= 1, f"Expected 1 or more batches, got {len(batches)}"
assert batches[0].status == JobStatus.running
# Poll it once
await poll_running_llm_batches(server)
# get the batch results
results = client.batches.retrieve(
batch_id=run.id,
)
assert results is not None
# cancel
client.batches.cancel(batch_id=run.id)
batch_job = client.batches.retrieve(
batch_id=run.id,
)
assert batch_job.status == JobStatus.cancelled