fix: patch AutoGen integration for 0.3.5 + add AutoGen integration tests (#1081)

This commit is contained in:
Charles Packer
2024-03-02 13:45:12 -08:00
committed by GitHub
parent bfea6ed7f4
commit e7f42d1420
6 changed files with 87 additions and 31 deletions

View File

@@ -41,6 +41,7 @@ jobs:
PGVECTOR_TEST_DB_URL: postgresql+pg8000://memgpt:memgpt@localhost:8888/memgpt
OPENAI_API_KEY: ${{ secrets.OPENAI_API_KEY }}
MEMGPT_SERVER_PASS: test_server_token
PYTHONPATH: ${{ github.workspace }}:${{ env.PYTHONPATH }}
run: |
poetry run pytest -s -vv -k "not test_storage and not test_server and not test_openai_client" tests

View File

@@ -14,7 +14,7 @@ from memgpt.models import chat_completion_response
from memgpt.interface import AgentInterface
from memgpt.persistence_manager import LocalStateManager
from memgpt.system import get_login_event, package_function_response, package_summarize_message, get_initial_boot_messages
from memgpt.memory import CoreMemory as InContextMemory, summarize_messages
from memgpt.memory import CoreMemory as InContextMemory, summarize_messages, ArchivalMemory, RecallMemory
from memgpt.llm_api_tools import create, is_context_overflow_error
from memgpt.utils import (
create_random_username,
@@ -45,7 +45,7 @@ from .errors import LLMError
from .functions.functions import USER_FUNCTIONS_DIR, load_all_function_sets
def link_functions(function_schemas):
def link_functions(function_schemas: list):
"""Link function definitions to list of function schemas"""
# need to dynamically link the functions
@@ -93,7 +93,7 @@ def link_functions(function_schemas):
return linked_function_set
def initialize_memory(ai_notes, human_notes):
def initialize_memory(ai_notes: Union[str, None], human_notes: Union[str, None]):
if ai_notes is None:
raise ValueError(ai_notes)
if human_notes is None:
@@ -104,7 +104,14 @@ def initialize_memory(ai_notes, human_notes):
return memory
def construct_system_with_memory(system, memory, memory_edit_timestamp, archival_memory=None, recall_memory=None, include_char_count=True):
def construct_system_with_memory(
system: str,
memory: InContextMemory,
memory_edit_timestamp: str,
archival_memory: ArchivalMemory = None,
recall_memory: RecallMemory = None,
include_char_count: bool = True,
):
full_system_message = "\n".join(
[
system,
@@ -125,13 +132,13 @@ def construct_system_with_memory(system, memory, memory_edit_timestamp, archival
def initialize_message_sequence(
model,
system,
memory,
archival_memory=None,
recall_memory=None,
memory_edit_timestamp=None,
include_initial_boot_message=True,
model: str,
system: str,
memory: InContextMemory,
archival_memory: ArchivalMemory = None,
recall_memory: RecallMemory = None,
memory_edit_timestamp: str = None,
include_initial_boot_message: bool = True,
):
if memory_edit_timestamp is None:
memory_edit_timestamp = get_local_time()

View File

@@ -161,7 +161,7 @@ memgpt_agent.load_and_attach(
)
# Initialize the group chat between the agents
groupchat = autogen.GroupChat(agents=[user_proxy, memgpt_agent], messages=[], max_round=12, speaker_selection_method="round_robin")
groupchat = autogen.GroupChat(agents=[user_proxy, memgpt_agent], messages=[], max_round=3, speaker_selection_method="round_robin")
manager = autogen.GroupChatManager(groupchat=groupchat, llm_config=llm_config)
# Begin the group chat with a message from the user

View File

@@ -185,7 +185,7 @@ else:
# Initialize the group chat between the user and two LLM agents (PM and coder)
groupchat = autogen.GroupChat(agents=[user_proxy, pm, coder], messages=[], max_round=12, speaker_selection_method="round_robin")
groupchat = autogen.GroupChat(agents=[user_proxy, pm, coder], messages=[], max_round=3, speaker_selection_method="round_robin")
manager = autogen.GroupChatManager(groupchat=groupchat, llm_config=llm_config)
# Begin the group chat with a message from the user

View File

@@ -1,8 +1,10 @@
import uuid
import sys
from typing import Callable, Optional, List, Dict, Union, Any, Tuple
from autogen.agentchat import Agent, ConversableAgent, UserProxyAgent, GroupChat, GroupChatManager
from memgpt.metadata import MetadataStore
from memgpt.agent import Agent as MemGPTAgent
from memgpt.agent import save_agent
from memgpt.autogen.interface import AutoGenInterface
@@ -12,10 +14,10 @@ import memgpt.utils as utils
import memgpt.presets.presets as presets
from memgpt.config import MemGPTConfig
from memgpt.credentials import MemGPTCredentials
from memgpt.cli.cli import attach
from memgpt.cli.cli_load import load_directory, load_webpage, load_index, load_database, load_vector_database
from memgpt.cli.cli_load import load_directory, load_vector_database
from memgpt.agent_store.storage import StorageConnector, TableType
from memgpt.data_types import AgentState, User, LLMConfig, EmbeddingConfig
from memgpt.utils import get_human_text, get_persona_text
class MemGPTConversableAgent(ConversableAgent):
@@ -332,23 +334,33 @@ def create_autogen_memgpt_agent(
user_id = uuid.UUID(agent_config["user_id"])
user = ms.get_user(user_id=user_id)
agent_state = AgentState(
name=agent_config["name"],
user_id=user_id,
persona=agent_config["persona"],
human=agent_config["human"],
llm_config=llm_config,
embedding_config=embedding_config,
preset=agent_config["preset"],
)
try:
preset = ms.get_preset(preset_name=agent_state.preset, user_id=user_id)
memgpt_agent = presets.create_agent_from_preset(
agent_state=agent_state,
preset=preset,
preset_obj = ms.get_preset(preset_name=agent_config["preset"] if "preset" in agent_config else config.preset, user_id=user.id)
if preset_obj is None:
# create preset records in metadata store
from memgpt.presets.presets import add_default_presets
add_default_presets(user.id, ms)
# try again
preset_obj = ms.get_preset(preset_name=agent_config["preset"] if "preset" in agent_config else config.preset, user_id=user.id)
if preset_obj is None:
print("Couldn't find presets in database, please run `memgpt configure`")
sys.exit(1)
# Overwrite fields in the preset if they were specified
# TODO make sure that the human/persona aren't filenames but actually real values
preset_obj.human = agent_config["human"] if "human" in agent_config else get_human_text(config.human)
preset_obj.persona = agent_config["persona"] if "persona" in agent_config else get_persona_text(config.persona)
memgpt_agent = MemGPTAgent(
interface=interface,
persona_is_file=False,
human_is_file=False,
name=agent_config["name"] if "name" in agent_config else None,
created_by=user.id,
preset=preset_obj,
llm_config=llm_config,
embedding_config=embedding_config,
# gpt-3.5-turbo tends to omit inner monologue, relax this requirement for now
first_message_verify_mono=True if (llm_config.model is not None and "gpt-4" in llm_config.model) else False,
)
# Save agent in database immediately after writing
save_agent(agent=memgpt_agent, ms=ms)
@@ -357,7 +369,7 @@ def create_autogen_memgpt_agent(
# After creating the agent, we then need to wrap it in a ConversableAgent so that it can be plugged into AutoGen
autogen_memgpt_agent = MemGPTConversableAgent(
name=agent_state.name,
name=memgpt_agent.agent_state.name,
agent=memgpt_agent,
default_auto_reply=default_auto_reply,
is_termination_msg=is_termination_msg,

View File

@@ -0,0 +1,36 @@
import os
import sys
import subprocess
import pytest
def test_agent_groupchat():
# Define the path to the script you want to test
script_path = "memgpt/autogen/examples/agent_groupchat.py"
# Dynamically get the project's root directory (assuming this script is run from the root)
# project_root = os.path.abspath(os.path.join(os.path.dirname(__file__), "..", ".."))
# print(project_root)
# project_root = os.path.join(project_root, "MemGPT")
# print(project_root)
# sys.exit(1)
project_root = os.path.abspath(os.path.join(os.path.dirname(__file__), ".."))
project_root = os.path.join(project_root, "memgpt")
print(f"Adding the following to PATH: {project_root}")
# Prepare the environment, adding the project root to PYTHONPATH
env = os.environ.copy()
env["PYTHONPATH"] = f"{project_root}:{env.get('PYTHONPATH', '')}"
# Run the script using subprocess.run
# Capture the output (stdout) and the exit code
# result = subprocess.run(["python", script_path], capture_output=True, text=True)
result = subprocess.run(["poetry", "run", "python", script_path], capture_output=True, text=True)
# Check the exit code (0 indicates success)
assert result.returncode == 0, f"Script exited with code {result.returncode}: {result.stderr}"
# Optionally, check the output for expected content
# For example, if you expect a specific line in the output, uncomment and adapt the following line:
# assert "expected output" in result.stdout, "Expected output not found in script's output"