test: clean up send message tests (#2700)
This commit is contained in:
@@ -13,9 +13,6 @@ from letta.settings import tool_settings
|
||||
def pytest_configure(config):
|
||||
logging.basicConfig(level=logging.DEBUG)
|
||||
|
||||
# Register custom markers
|
||||
config.addinivalue_line("markers", "async_client_test: mark test as an async client test that is skipped by default")
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
def disable_e2b_api_key() -> Generator[None, None, None]:
|
||||
|
||||
@@ -420,246 +420,6 @@ def test_tool_call(
|
||||
assert_tool_call_response(messages_from_db, from_db=True)
|
||||
|
||||
|
||||
@pytest.mark.asyncio
|
||||
@pytest.mark.async_client_test
|
||||
@pytest.mark.parametrize(
|
||||
"llm_config",
|
||||
TESTED_LLM_CONFIGS,
|
||||
ids=[c.model for c in TESTED_LLM_CONFIGS],
|
||||
)
|
||||
async def test_greeting_with_assistant_message_async_client(
|
||||
disable_e2b_api_key: Any,
|
||||
async_client: AsyncLetta,
|
||||
agent_state: AgentState,
|
||||
llm_config: LLMConfig,
|
||||
) -> None:
|
||||
"""
|
||||
Tests sending a message with an asynchronous client.
|
||||
Validates that the response messages match the expected sequence.
|
||||
"""
|
||||
agent_state = await async_client.agents.modify(agent_id=agent_state.id, llm_config=llm_config)
|
||||
response = await async_client.agents.messages.create(
|
||||
agent_id=agent_state.id,
|
||||
messages=USER_MESSAGE_FORCE_REPLY,
|
||||
)
|
||||
assert_greeting_with_assistant_message_response(response.messages)
|
||||
|
||||
|
||||
@pytest.mark.asyncio
|
||||
@pytest.mark.async_client_test
|
||||
@pytest.mark.parametrize(
|
||||
"llm_config",
|
||||
TESTED_LLM_CONFIGS,
|
||||
ids=[c.model for c in TESTED_LLM_CONFIGS],
|
||||
)
|
||||
async def test_greeting_without_assistant_message_async_client(
|
||||
disable_e2b_api_key: Any,
|
||||
async_client: AsyncLetta,
|
||||
agent_state: AgentState,
|
||||
llm_config: LLMConfig,
|
||||
) -> None:
|
||||
"""
|
||||
Tests sending a message with an asynchronous client.
|
||||
Validates that the response messages match the expected sequence.
|
||||
"""
|
||||
agent_state = await async_client.agents.modify(agent_id=agent_state.id, llm_config=llm_config)
|
||||
response = await async_client.agents.messages.create(
|
||||
agent_id=agent_state.id,
|
||||
messages=USER_MESSAGE_FORCE_REPLY,
|
||||
use_assistant_message=False,
|
||||
)
|
||||
assert_greeting_without_assistant_message_response(response.messages)
|
||||
|
||||
|
||||
@pytest.mark.asyncio
|
||||
@pytest.mark.async_client_test
|
||||
@pytest.mark.parametrize(
|
||||
"llm_config",
|
||||
TESTED_LLM_CONFIGS,
|
||||
ids=[c.model for c in TESTED_LLM_CONFIGS],
|
||||
)
|
||||
async def test_tool_call_async_client(
|
||||
disable_e2b_api_key: Any,
|
||||
async_client: AsyncLetta,
|
||||
agent_state: AgentState,
|
||||
llm_config: LLMConfig,
|
||||
) -> None:
|
||||
"""
|
||||
Tests sending a message with an asynchronous client.
|
||||
Validates that the response messages match the expected sequence.
|
||||
"""
|
||||
dice_tool = await async_client.tools.upsert_from_function(func=roll_dice)
|
||||
agent_state = await async_client.agents.tools.attach(agent_id=agent_state.id, tool_id=dice_tool.id)
|
||||
agent_state = await async_client.agents.modify(agent_id=agent_state.id, llm_config=llm_config)
|
||||
response = await async_client.agents.messages.create(
|
||||
agent_id=agent_state.id,
|
||||
messages=USER_MESSAGE_ROLL_DICE,
|
||||
)
|
||||
assert_tool_call_response(response.messages)
|
||||
|
||||
|
||||
@pytest.mark.parametrize(
|
||||
"llm_config",
|
||||
TESTED_LLM_CONFIGS,
|
||||
ids=[c.model for c in TESTED_LLM_CONFIGS],
|
||||
)
|
||||
def test_streaming_greeting_with_assistant_message(
|
||||
disable_e2b_api_key: Any,
|
||||
client: Letta,
|
||||
agent_state: AgentState,
|
||||
llm_config: LLMConfig,
|
||||
) -> None:
|
||||
"""
|
||||
Tests sending a streaming message with a synchronous client.
|
||||
Checks that each chunk in the stream has the correct message types.
|
||||
"""
|
||||
agent_state = client.agents.modify(agent_id=agent_state.id, llm_config=llm_config)
|
||||
response = client.agents.messages.create_stream(
|
||||
agent_id=agent_state.id,
|
||||
messages=USER_MESSAGE_FORCE_REPLY,
|
||||
)
|
||||
chunks = list(response)
|
||||
messages = accumulate_chunks(chunks)
|
||||
assert_greeting_with_assistant_message_response(messages, streaming=True)
|
||||
|
||||
|
||||
@pytest.mark.parametrize(
|
||||
"llm_config",
|
||||
TESTED_LLM_CONFIGS,
|
||||
ids=[c.model for c in TESTED_LLM_CONFIGS],
|
||||
)
|
||||
def test_streaming_greeting_without_assistant_message(
|
||||
disable_e2b_api_key: Any,
|
||||
client: Letta,
|
||||
agent_state: AgentState,
|
||||
llm_config: LLMConfig,
|
||||
) -> None:
|
||||
"""
|
||||
Tests sending a streaming message with a synchronous client.
|
||||
Checks that each chunk in the stream has the correct message types.
|
||||
"""
|
||||
agent_state = client.agents.modify(agent_id=agent_state.id, llm_config=llm_config)
|
||||
response = client.agents.messages.create_stream(
|
||||
agent_id=agent_state.id,
|
||||
messages=USER_MESSAGE_FORCE_REPLY,
|
||||
use_assistant_message=False,
|
||||
)
|
||||
chunks = list(response)
|
||||
messages = accumulate_chunks(chunks)
|
||||
assert_greeting_without_assistant_message_response(messages, streaming=True)
|
||||
|
||||
|
||||
@pytest.mark.parametrize(
|
||||
"llm_config",
|
||||
TESTED_LLM_CONFIGS,
|
||||
ids=[c.model for c in TESTED_LLM_CONFIGS],
|
||||
)
|
||||
def test_streaming_tool_call(
|
||||
disable_e2b_api_key: Any,
|
||||
client: Letta,
|
||||
agent_state: AgentState,
|
||||
llm_config: LLMConfig,
|
||||
) -> None:
|
||||
"""
|
||||
Tests sending a streaming message with a synchronous client.
|
||||
Checks that each chunk in the stream has the correct message types.
|
||||
"""
|
||||
dice_tool = client.tools.upsert_from_function(func=roll_dice)
|
||||
agent_state = client.agents.tools.attach(agent_id=agent_state.id, tool_id=dice_tool.id)
|
||||
agent_state = client.agents.modify(agent_id=agent_state.id, llm_config=llm_config)
|
||||
response = client.agents.messages.create_stream(
|
||||
agent_id=agent_state.id,
|
||||
messages=USER_MESSAGE_ROLL_DICE,
|
||||
)
|
||||
chunks = list(response)
|
||||
messages = accumulate_chunks(chunks)
|
||||
assert_tool_call_response(messages, streaming=True)
|
||||
|
||||
|
||||
@pytest.mark.asyncio
|
||||
@pytest.mark.async_client_test
|
||||
@pytest.mark.parametrize(
|
||||
"llm_config",
|
||||
TESTED_LLM_CONFIGS,
|
||||
ids=[c.model for c in TESTED_LLM_CONFIGS],
|
||||
)
|
||||
async def test_streaming_greeting_with_assistant_message_async_client(
|
||||
disable_e2b_api_key: Any,
|
||||
async_client: AsyncLetta,
|
||||
agent_state: AgentState,
|
||||
llm_config: LLMConfig,
|
||||
) -> None:
|
||||
"""
|
||||
Tests sending a streaming message with an asynchronous client.
|
||||
Validates that the streaming response chunks include the correct message types.
|
||||
"""
|
||||
await async_client.agents.modify(agent_id=agent_state.id, llm_config=llm_config)
|
||||
response = async_client.agents.messages.create_stream(
|
||||
agent_id=agent_state.id,
|
||||
messages=USER_MESSAGE_FORCE_REPLY,
|
||||
)
|
||||
chunks = [chunk async for chunk in response]
|
||||
messages = accumulate_chunks(chunks)
|
||||
assert_greeting_with_assistant_message_response(messages, streaming=True)
|
||||
|
||||
|
||||
@pytest.mark.asyncio
|
||||
@pytest.mark.async_client_test
|
||||
@pytest.mark.parametrize(
|
||||
"llm_config",
|
||||
TESTED_LLM_CONFIGS,
|
||||
ids=[c.model for c in TESTED_LLM_CONFIGS],
|
||||
)
|
||||
async def test_streaming_greeting_without_assistant_message_async_client(
|
||||
disable_e2b_api_key: Any,
|
||||
async_client: AsyncLetta,
|
||||
agent_state: AgentState,
|
||||
llm_config: LLMConfig,
|
||||
) -> None:
|
||||
"""
|
||||
Tests sending a streaming message with an asynchronous client.
|
||||
Validates that the streaming response chunks include the correct message types.
|
||||
"""
|
||||
await async_client.agents.modify(agent_id=agent_state.id, llm_config=llm_config)
|
||||
response = async_client.agents.messages.create_stream(
|
||||
agent_id=agent_state.id,
|
||||
messages=USER_MESSAGE_FORCE_REPLY,
|
||||
use_assistant_message=False,
|
||||
)
|
||||
chunks = [chunk async for chunk in response]
|
||||
messages = accumulate_chunks(chunks)
|
||||
assert_greeting_without_assistant_message_response(messages, streaming=True)
|
||||
|
||||
|
||||
@pytest.mark.asyncio
|
||||
@pytest.mark.async_client_test
|
||||
@pytest.mark.parametrize(
|
||||
"llm_config",
|
||||
TESTED_LLM_CONFIGS,
|
||||
ids=[c.model for c in TESTED_LLM_CONFIGS],
|
||||
)
|
||||
async def test_streaming_tool_call_async_client(
|
||||
disable_e2b_api_key: Any,
|
||||
async_client: AsyncLetta,
|
||||
agent_state: AgentState,
|
||||
llm_config: LLMConfig,
|
||||
) -> None:
|
||||
"""
|
||||
Tests sending a streaming message with an asynchronous client.
|
||||
Validates that the streaming response chunks include the correct message types.
|
||||
"""
|
||||
dice_tool = await async_client.tools.upsert_from_function(func=roll_dice)
|
||||
agent_state = await async_client.agents.tools.attach(agent_id=agent_state.id, tool_id=dice_tool.id)
|
||||
agent_state = await async_client.agents.modify(agent_id=agent_state.id, llm_config=llm_config)
|
||||
response = async_client.agents.messages.create_stream(
|
||||
agent_id=agent_state.id,
|
||||
messages=USER_MESSAGE_ROLL_DICE,
|
||||
)
|
||||
chunks = [chunk async for chunk in response]
|
||||
messages = accumulate_chunks(chunks)
|
||||
assert_tool_call_response(messages, streaming=True)
|
||||
|
||||
|
||||
@pytest.mark.parametrize(
|
||||
"llm_config",
|
||||
TESTED_LLM_CONFIGS,
|
||||
@@ -675,16 +435,76 @@ def test_step_streaming_greeting_with_assistant_message(
|
||||
Tests sending a streaming message with a synchronous client.
|
||||
Checks that each chunk in the stream has the correct message types.
|
||||
"""
|
||||
last_message = client.agents.messages.list(agent_id=agent_state.id, limit=1)
|
||||
agent_state = client.agents.modify(agent_id=agent_state.id, llm_config=llm_config)
|
||||
response = client.agents.messages.create_stream(
|
||||
agent_id=agent_state.id,
|
||||
messages=USER_MESSAGE_FORCE_REPLY,
|
||||
stream_tokens=False,
|
||||
)
|
||||
messages = []
|
||||
for message in response:
|
||||
messages.append(message)
|
||||
chunks = list(response)
|
||||
messages = accumulate_chunks(chunks)
|
||||
assert_greeting_with_assistant_message_response(messages, streaming=True)
|
||||
messages_from_db = client.agents.messages.list(agent_id=agent_state.id, after=last_message[0].id)
|
||||
assert_greeting_with_assistant_message_response(messages_from_db, from_db=True)
|
||||
|
||||
|
||||
@pytest.mark.parametrize(
|
||||
"llm_config",
|
||||
TESTED_LLM_CONFIGS,
|
||||
ids=[c.model for c in TESTED_LLM_CONFIGS],
|
||||
)
|
||||
def test_step_streaming_greeting_without_assistant_message(
|
||||
disable_e2b_api_key: Any,
|
||||
client: Letta,
|
||||
agent_state: AgentState,
|
||||
llm_config: LLMConfig,
|
||||
) -> None:
|
||||
"""
|
||||
Tests sending a streaming message with a synchronous client.
|
||||
Checks that each chunk in the stream has the correct message types.
|
||||
"""
|
||||
last_message = client.agents.messages.list(agent_id=agent_state.id, limit=1)
|
||||
agent_state = client.agents.modify(agent_id=agent_state.id, llm_config=llm_config)
|
||||
response = client.agents.messages.create_stream(
|
||||
agent_id=agent_state.id,
|
||||
messages=USER_MESSAGE_FORCE_REPLY,
|
||||
use_assistant_message=False,
|
||||
)
|
||||
chunks = list(response)
|
||||
messages = accumulate_chunks(chunks)
|
||||
assert_greeting_without_assistant_message_response(messages, streaming=True)
|
||||
messages_from_db = client.agents.messages.list(agent_id=agent_state.id, after=last_message[0].id, use_assistant_message=False)
|
||||
assert_greeting_without_assistant_message_response(messages_from_db, from_db=True)
|
||||
|
||||
|
||||
@pytest.mark.parametrize(
|
||||
"llm_config",
|
||||
TESTED_LLM_CONFIGS,
|
||||
ids=[c.model for c in TESTED_LLM_CONFIGS],
|
||||
)
|
||||
def test_step_streaming_tool_call(
|
||||
disable_e2b_api_key: Any,
|
||||
client: Letta,
|
||||
agent_state: AgentState,
|
||||
llm_config: LLMConfig,
|
||||
) -> None:
|
||||
"""
|
||||
Tests sending a streaming message with a synchronous client.
|
||||
Checks that each chunk in the stream has the correct message types.
|
||||
"""
|
||||
dice_tool = client.tools.upsert_from_function(func=roll_dice)
|
||||
agent_state = client.agents.tools.attach(agent_id=agent_state.id, tool_id=dice_tool.id)
|
||||
last_message = client.agents.messages.list(agent_id=agent_state.id, limit=1)
|
||||
agent_state = client.agents.modify(agent_id=agent_state.id, llm_config=llm_config)
|
||||
response = client.agents.messages.create_stream(
|
||||
agent_id=agent_state.id,
|
||||
messages=USER_MESSAGE_ROLL_DICE,
|
||||
)
|
||||
chunks = list(response)
|
||||
messages = accumulate_chunks(chunks)
|
||||
assert_tool_call_response(messages, streaming=True)
|
||||
messages_from_db = client.agents.messages.list(agent_id=agent_state.id, after=last_message[0].id)
|
||||
assert_tool_call_response(messages_from_db, from_db=True)
|
||||
|
||||
|
||||
@pytest.mark.parametrize(
|
||||
@@ -702,6 +522,7 @@ def test_token_streaming_greeting_with_assistant_message(
|
||||
Tests sending a streaming message with a synchronous client.
|
||||
Checks that each chunk in the stream has the correct message types.
|
||||
"""
|
||||
last_message = client.agents.messages.list(agent_id=agent_state.id, limit=1)
|
||||
agent_state = client.agents.modify(agent_id=agent_state.id, llm_config=llm_config)
|
||||
response = client.agents.messages.create_stream(
|
||||
agent_id=agent_state.id,
|
||||
@@ -711,6 +532,8 @@ def test_token_streaming_greeting_with_assistant_message(
|
||||
chunks = list(response)
|
||||
messages = accumulate_chunks(chunks)
|
||||
assert_greeting_with_assistant_message_response(messages, streaming=True, token_streaming=True)
|
||||
messages_from_db = client.agents.messages.list(agent_id=agent_state.id, after=last_message[0].id)
|
||||
assert_greeting_with_assistant_message_response(messages_from_db, from_db=True)
|
||||
|
||||
|
||||
@pytest.mark.parametrize(
|
||||
@@ -728,6 +551,7 @@ def test_token_streaming_greeting_without_assistant_message(
|
||||
Tests sending a streaming message with a synchronous client.
|
||||
Checks that each chunk in the stream has the correct message types.
|
||||
"""
|
||||
last_message = client.agents.messages.list(agent_id=agent_state.id, limit=1)
|
||||
agent_state = client.agents.modify(agent_id=agent_state.id, llm_config=llm_config)
|
||||
response = client.agents.messages.create_stream(
|
||||
agent_id=agent_state.id,
|
||||
@@ -738,6 +562,8 @@ def test_token_streaming_greeting_without_assistant_message(
|
||||
chunks = list(response)
|
||||
messages = accumulate_chunks(chunks)
|
||||
assert_greeting_without_assistant_message_response(messages, streaming=True, token_streaming=True)
|
||||
messages_from_db = client.agents.messages.list(agent_id=agent_state.id, after=last_message[0].id, use_assistant_message=False)
|
||||
assert_greeting_without_assistant_message_response(messages_from_db, from_db=True)
|
||||
|
||||
|
||||
@pytest.mark.parametrize(
|
||||
@@ -757,6 +583,7 @@ def test_token_streaming_tool_call(
|
||||
"""
|
||||
dice_tool = client.tools.upsert_from_function(func=roll_dice)
|
||||
agent_state = client.agents.tools.attach(agent_id=agent_state.id, tool_id=dice_tool.id)
|
||||
last_message = client.agents.messages.list(agent_id=agent_state.id, limit=1)
|
||||
agent_state = client.agents.modify(agent_id=agent_state.id, llm_config=llm_config)
|
||||
response = client.agents.messages.create_stream(
|
||||
agent_id=agent_state.id,
|
||||
@@ -766,93 +593,8 @@ def test_token_streaming_tool_call(
|
||||
chunks = list(response)
|
||||
messages = accumulate_chunks(chunks)
|
||||
assert_tool_call_response(messages, streaming=True)
|
||||
|
||||
|
||||
@pytest.mark.asyncio
|
||||
@pytest.mark.async_client_test
|
||||
@pytest.mark.parametrize(
|
||||
"llm_config",
|
||||
TESTED_LLM_CONFIGS,
|
||||
ids=[c.model for c in TESTED_LLM_CONFIGS],
|
||||
)
|
||||
async def test_token_streaming_greeting_with_assistant_message_async_client(
|
||||
disable_e2b_api_key: Any,
|
||||
async_client: AsyncLetta,
|
||||
agent_state: AgentState,
|
||||
llm_config: LLMConfig,
|
||||
) -> None:
|
||||
"""
|
||||
Tests sending a streaming message with an asynchronous client.
|
||||
Validates that the streaming response chunks include the correct message types.
|
||||
"""
|
||||
await async_client.agents.modify(agent_id=agent_state.id, llm_config=llm_config)
|
||||
response = async_client.agents.messages.create_stream(
|
||||
agent_id=agent_state.id,
|
||||
messages=USER_MESSAGE_FORCE_REPLY,
|
||||
stream_tokens=True,
|
||||
)
|
||||
chunks = [chunk async for chunk in response]
|
||||
messages = accumulate_chunks(chunks)
|
||||
assert_greeting_with_assistant_message_response(messages, streaming=True)
|
||||
|
||||
|
||||
@pytest.mark.asyncio
|
||||
@pytest.mark.async_client_test
|
||||
@pytest.mark.parametrize(
|
||||
"llm_config",
|
||||
TESTED_LLM_CONFIGS,
|
||||
ids=[c.model for c in TESTED_LLM_CONFIGS],
|
||||
)
|
||||
async def test_token_streaming_greeting_without_assistant_message_async_client(
|
||||
disable_e2b_api_key: Any,
|
||||
async_client: AsyncLetta,
|
||||
agent_state: AgentState,
|
||||
llm_config: LLMConfig,
|
||||
) -> None:
|
||||
"""
|
||||
Tests sending a streaming message with an asynchronous client.
|
||||
Validates that the streaming response chunks include the correct message types.
|
||||
"""
|
||||
await async_client.agents.modify(agent_id=agent_state.id, llm_config=llm_config)
|
||||
response = async_client.agents.messages.create_stream(
|
||||
agent_id=agent_state.id,
|
||||
messages=USER_MESSAGE_FORCE_REPLY,
|
||||
use_assistant_message=False,
|
||||
stream_tokens=True,
|
||||
)
|
||||
chunks = [chunk async for chunk in response]
|
||||
messages = accumulate_chunks(chunks)
|
||||
assert_greeting_without_assistant_message_response(messages, streaming=True)
|
||||
|
||||
|
||||
@pytest.mark.asyncio
|
||||
@pytest.mark.async_client_test
|
||||
@pytest.mark.parametrize(
|
||||
"llm_config",
|
||||
TESTED_LLM_CONFIGS,
|
||||
ids=[c.model for c in TESTED_LLM_CONFIGS],
|
||||
)
|
||||
async def test_token_streaming_tool_call_async_client(
|
||||
disable_e2b_api_key: Any,
|
||||
async_client: AsyncLetta,
|
||||
agent_state: AgentState,
|
||||
llm_config: LLMConfig,
|
||||
) -> None:
|
||||
"""
|
||||
Tests sending a streaming message with an asynchronous client.
|
||||
Validates that the streaming response chunks include the correct message types.
|
||||
"""
|
||||
dice_tool = await async_client.tools.upsert_from_function(func=roll_dice)
|
||||
agent_state = await async_client.agents.tools.attach(agent_id=agent_state.id, tool_id=dice_tool.id)
|
||||
agent_state = await async_client.agents.modify(agent_id=agent_state.id, llm_config=llm_config)
|
||||
response = async_client.agents.messages.create_stream(
|
||||
agent_id=agent_state.id,
|
||||
messages=USER_MESSAGE_ROLL_DICE,
|
||||
stream_tokens=True,
|
||||
)
|
||||
chunks = [chunk async for chunk in response]
|
||||
messages = accumulate_chunks(chunks)
|
||||
assert_tool_call_response(messages, streaming=True)
|
||||
messages_from_db = client.agents.messages.list(agent_id=agent_state.id, after=last_message[0].id)
|
||||
assert_tool_call_response(messages_from_db, from_db=True)
|
||||
|
||||
|
||||
@pytest.mark.parametrize(
|
||||
@@ -885,47 +627,12 @@ def test_async_greeting_with_assistant_message(
|
||||
assert_tool_response_dict_messages(messages)
|
||||
|
||||
|
||||
@pytest.mark.asyncio
|
||||
@pytest.mark.async_client_test
|
||||
@pytest.mark.parametrize(
|
||||
"llm_config",
|
||||
TESTED_LLM_CONFIGS,
|
||||
ids=[c.model for c in TESTED_LLM_CONFIGS],
|
||||
)
|
||||
async def test_async_greeting_with_assistant_message_async_client(
|
||||
disable_e2b_api_key: Any,
|
||||
client: Letta,
|
||||
async_client: AsyncLetta,
|
||||
agent_state: AgentState,
|
||||
llm_config: LLMConfig,
|
||||
) -> None:
|
||||
"""
|
||||
Tests sending a message as an asynchronous job using the asynchronous client.
|
||||
Waits for job completion and verifies that the resulting messages meet the expected format.
|
||||
"""
|
||||
await async_client.agents.modify(agent_id=agent_state.id, llm_config=llm_config)
|
||||
|
||||
run = await async_client.agents.messages.create_async(
|
||||
agent_id=agent_state.id,
|
||||
messages=USER_MESSAGE_FORCE_REPLY,
|
||||
)
|
||||
# Use the synchronous client to check job completion
|
||||
run = wait_for_run_completion(client, run.id)
|
||||
|
||||
result = run.metadata.get("result")
|
||||
assert result is not None, "Run metadata missing 'result' key"
|
||||
|
||||
messages = result["messages"]
|
||||
assert_tool_response_dict_messages(messages)
|
||||
|
||||
|
||||
@pytest.mark.asyncio
|
||||
@pytest.mark.parametrize(
|
||||
"llm_config",
|
||||
TESTED_LLM_CONFIGS,
|
||||
ids=[c.model for c in TESTED_LLM_CONFIGS],
|
||||
)
|
||||
async def test_auto_summarize(disable_e2b_api_key: Any, client: Letta, llm_config: LLMConfig):
|
||||
def test_auto_summarize(disable_e2b_api_key: Any, client: Letta, llm_config: LLMConfig):
|
||||
"""Test that summarization is automatically triggered."""
|
||||
llm_config.context_window = 3000
|
||||
client.tools.upsert_base_tools()
|
||||
|
||||
@@ -11,6 +11,3 @@ markers =
|
||||
anthropic_basic: Tests for Anthropic endpoints
|
||||
azure_basic: Tests for Azure endpoints
|
||||
gemini_basic: Tests for Gemini endpoints
|
||||
async_client_test: mark test as an async client test that is skipped by default
|
||||
|
||||
addopts = -m "not async_client_test"
|
||||
|
||||
Reference in New Issue
Block a user