fix: Fix memory summarization (#2029)

This commit is contained in:
Matthew Zhou
2024-11-12 18:02:34 -08:00
committed by GitHub
parent e40e60945a
commit c9c10e945e
5 changed files with 67 additions and 21 deletions

View File

@@ -29,49 +29,49 @@ jobs:
env:
OPENAI_API_KEY: ${{ secrets.OPENAI_API_KEY }}
run: |
poetry run pytest -s -vv tests/test_endpoints.py::test_openai_gpt_4_returns_valid_first_message
poetry run pytest -s -vv tests/test_endpoints.py::test_openai_gpt_4o_returns_valid_first_message
- name: Test model sends message with keyword
id: test_keyword_message
env:
OPENAI_API_KEY: ${{ secrets.OPENAI_API_KEY }}
run: |
poetry run pytest -s -vv tests/test_endpoints.py::test_openai_gpt_4_returns_keyword
poetry run pytest -s -vv tests/test_endpoints.py::test_openai_gpt_4o_returns_keyword
- name: Test model uses external tool correctly
id: test_external_tool
env:
OPENAI_API_KEY: ${{ secrets.OPENAI_API_KEY }}
run: |
poetry run pytest -s -vv tests/test_endpoints.py::test_openai_gpt_4_uses_external_tool
poetry run pytest -s -vv tests/test_endpoints.py::test_openai_gpt_4o_uses_external_tool
- name: Test model recalls chat memory
id: test_chat_memory
env:
OPENAI_API_KEY: ${{ secrets.OPENAI_API_KEY }}
run: |
poetry run pytest -s -vv tests/test_endpoints.py::test_openai_gpt_4_recall_chat_memory
poetry run pytest -s -vv tests/test_endpoints.py::test_openai_gpt_4o_recall_chat_memory
- name: Test model uses 'archival_memory_search' to find secret
id: test_archival_memory_search
env:
OPENAI_API_KEY: ${{ secrets.OPENAI_API_KEY }}
run: |
poetry run pytest -s -vv tests/test_endpoints.py::test_openai_gpt_4_archival_memory_retrieval
poetry run pytest -s -vv tests/test_endpoints.py::test_openai_gpt_4o_archival_memory_retrieval
- name: Test model uses 'archival_memory_insert' to insert archival memories
id: test_archival_memory_insert
env:
OPENAI_API_KEY: ${{ secrets.OPENAI_API_KEY }}
run: |
poetry run pytest -s -vv tests/test_endpoints.py::test_openai_gpt_4_archival_memory_insert
poetry run pytest -s -vv tests/test_endpoints.py::test_openai_gpt_4o_archival_memory_insert
- name: Test model can edit core memories
id: test_core_memory
env:
OPENAI_API_KEY: ${{ secrets.OPENAI_API_KEY }}
run: |
poetry run pytest -s -vv tests/test_endpoints.py::test_openai_gpt_4_edit_core_memory
poetry run pytest -s -vv tests/test_endpoints.py::test_openai_gpt_4o_edit_core_memory
- name: Test embedding endpoint
id: test_embedding_endpoint

View File

@@ -126,6 +126,7 @@ def build_openai_chat_completions_request(
openai_message_list = [
cast_message_to_subtype(m.to_openai_dict(put_inner_thoughts_in_kwargs=llm_config.put_inner_thoughts_in_kwargs)) for m in messages
]
if llm_config.model:
model = llm_config.model
else:

View File

@@ -7,6 +7,7 @@ from letta.embeddings import embedding_model, parse_and_chunk_text, query_embedd
from letta.llm_api.llm_api_tools import create
from letta.prompts.gpt_summarize import SYSTEM as SUMMARY_PROMPT_SYSTEM
from letta.schemas.agent import AgentState
from letta.schemas.enums import MessageRole
from letta.schemas.memory import Memory
from letta.schemas.message import Message
from letta.schemas.passage import Passage
@@ -50,7 +51,6 @@ def _format_summary_history(message_history: List[Message]):
def summarize_messages(
agent_state: AgentState,
message_sequence_to_summarize: List[Message],
insert_acknowledgement_assistant_message: bool = True,
):
"""Summarize a message sequence using GPT"""
# we need the context_window
@@ -70,13 +70,17 @@ def summarize_messages(
dummy_user_id = agent_state.user_id
dummy_agent_id = agent_state.id
message_sequence = []
message_sequence.append(Message(user_id=dummy_user_id, agent_id=dummy_agent_id, role="system", text=summary_prompt))
if insert_acknowledgement_assistant_message:
message_sequence.append(Message(user_id=dummy_user_id, agent_id=dummy_agent_id, role="assistant", text=MESSAGE_SUMMARY_REQUEST_ACK))
message_sequence.append(Message(user_id=dummy_user_id, agent_id=dummy_agent_id, role="user", text=summary_input))
message_sequence.append(Message(user_id=dummy_user_id, agent_id=dummy_agent_id, role=MessageRole.system, text=summary_prompt))
message_sequence.append(
Message(user_id=dummy_user_id, agent_id=dummy_agent_id, role=MessageRole.assistant, text=MESSAGE_SUMMARY_REQUEST_ACK)
)
message_sequence.append(Message(user_id=dummy_user_id, agent_id=dummy_agent_id, role=MessageRole.user, text=summary_input))
# TODO: We need to eventually have a separate LLM config for the summarizer LLM
llm_config_no_inner_thoughts = agent_state.llm_config.model_copy(deep=True)
llm_config_no_inner_thoughts.put_inner_thoughts_in_kwargs = False
response = create(
llm_config=agent_state.llm_config,
llm_config=llm_config_no_inner_thoughts,
user_id=agent_state.user_id,
messages=message_sequence,
stream=False,

View File

@@ -38,7 +38,7 @@ from letta.schemas.openai.chat_completion_response import (
FunctionCall,
Message,
)
from letta.utils import get_human_text, get_persona_text
from letta.utils import get_human_text, get_persona_text, json_dumps
from tests.helpers.utils import cleanup
# Generate uuid for agent name for this example
@@ -321,6 +321,40 @@ def check_agent_edit_core_memory(filename: str) -> LettaResponse:
return response
def check_agent_summarize_memory_simple(filename: str) -> LettaResponse:
"""
Checks that the LLM is able to summarize its memory
"""
# Set up client
client = create_client()
cleanup(client=client, agent_uuid=agent_uuid)
agent_state = setup_agent(client, filename)
# Send a couple messages
friend_name = "Shub"
client.user_message(agent_id=agent_state.id, message="Hey, how's it going? What do you think about this whole shindig")
client.user_message(agent_id=agent_state.id, message=f"By the way, my friend's name is {friend_name}!")
client.user_message(agent_id=agent_state.id, message="Does the number 42 ring a bell?")
# Summarize
agent = client.server._get_or_load_agent(agent_id=agent_state.id)
agent.summarize_messages_inplace()
print(f"Summarization succeeded: messages[1] = \n\n{json_dumps(agent.messages[1])}\n")
response = client.user_message(agent_id=agent_state.id, message="What is my friend's name?")
# Basic checks
assert_sanity_checks(response)
# Make sure my name was repeated back to me
assert_invoked_send_message_with_keyword(response.messages, friend_name)
# Make sure some inner monologue is present
assert_inner_monologue_is_present_and_valid(response.messages)
return response
def run_embedding_endpoint(filename):
# load JSON file
config_data = json.load(open(filename, "r"))

View File

@@ -7,6 +7,7 @@ from tests.helpers.endpoints_helper import (
check_agent_archival_memory_retrieval,
check_agent_edit_core_memory,
check_agent_recall_chat_memory,
check_agent_summarize_memory_simple,
check_agent_uses_external_tool,
check_first_response_is_valid_for_llm_endpoint,
check_response_contains_keyword,
@@ -58,14 +59,14 @@ def retry_until_threshold(threshold=0.5, max_attempts=10, sleep_time_seconds=4):
# ======================================================================================================================
# OPENAI TESTS
# ======================================================================================================================
def test_openai_gpt_4_returns_valid_first_message():
def test_openai_gpt_4o_returns_valid_first_message():
filename = os.path.join(llm_config_dir, "openai-gpt-4o.json")
response = check_first_response_is_valid_for_llm_endpoint(filename)
# Log out successful response
print(f"Got successful response from client: \n\n{response}")
def test_openai_gpt_4_returns_keyword():
def test_openai_gpt_4o_returns_keyword():
keyword = "banana"
filename = os.path.join(llm_config_dir, "openai-gpt-4o.json")
response = check_response_contains_keyword(filename, keyword=keyword)
@@ -73,41 +74,47 @@ def test_openai_gpt_4_returns_keyword():
print(f"Got successful response from client: \n\n{response}")
def test_openai_gpt_4_uses_external_tool():
def test_openai_gpt_4o_uses_external_tool():
filename = os.path.join(llm_config_dir, "openai-gpt-4o.json")
response = check_agent_uses_external_tool(filename)
# Log out successful response
print(f"Got successful response from client: \n\n{response}")
def test_openai_gpt_4_recall_chat_memory():
def test_openai_gpt_4o_recall_chat_memory():
filename = os.path.join(llm_config_dir, "openai-gpt-4o.json")
response = check_agent_recall_chat_memory(filename)
# Log out successful response
print(f"Got successful response from client: \n\n{response}")
def test_openai_gpt_4_archival_memory_retrieval():
def test_openai_gpt_4o_archival_memory_retrieval():
filename = os.path.join(llm_config_dir, "openai-gpt-4o.json")
response = check_agent_archival_memory_retrieval(filename)
# Log out successful response
print(f"Got successful response from client: \n\n{response}")
def test_openai_gpt_4_archival_memory_insert():
def test_openai_gpt_4o_archival_memory_insert():
filename = os.path.join(llm_config_dir, "openai-gpt-4o.json")
response = check_agent_archival_memory_insert(filename)
# Log out successful response
print(f"Got successful response from client: \n\n{response}")
def test_openai_gpt_4_edit_core_memory():
def test_openai_gpt_4o_edit_core_memory():
filename = os.path.join(llm_config_dir, "openai-gpt-4o.json")
response = check_agent_edit_core_memory(filename)
# Log out successful response
print(f"Got successful response from client: \n\n{response}")
def test_openai_gpt_4o_summarize_memory():
filename = os.path.join(llm_config_dir, "openai-gpt-4o.json")
response = check_agent_summarize_memory_simple(filename)
print(f"Got successful response from client: \n\n{response}")
def test_embedding_endpoint_openai():
filename = os.path.join(embedding_config_dir, "openai_embed.json")
run_embedding_endpoint(filename)