Refactor autogen agent to use sync memgpt, add notebook example (#157)
* Refactor autogen agent to use sync memgpt, add notebook example * Add colab badge to notebook * Update colab badge to point to main * Add imports lost in the merge * Changes to make autogenagent work with cli refactor
This commit is contained in:
147
memgpt/autogen/examples/memgpt_coder_autogen.ipynb
Normal file
147
memgpt/autogen/examples/memgpt_coder_autogen.ipynb
Normal file
@@ -0,0 +1,147 @@
|
||||
{
|
||||
"cells": [
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "591be0c0-7332-4c57-adcf-fecc578eeb67",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"<a target=\"_blank\" href=\"https://colab.research.google.com/github/cpacker/MemGPT/blob/main/memgpt/autogen/examples/memgpt_coder_autogen.ipynb\">\n",
|
||||
" <img src=\"https://colab.research.google.com/assets/colab-badge.svg\" alt=\"Open In Colab\"/>\n",
|
||||
"</a>"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "43d71a67-3a01-4543-99ad-7dce12d793da",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"%pip install pyautogen"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "b3754942-819b-4df9-be3f-6cfb3ca101dc",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"%pip install pymemgpt"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "bd6df0ac-66a6-4dc7-9262-4c2ad05fab91",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"import openai\n",
|
||||
"openai.api_key=\"YOUR_API_KEY\""
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "0cb9b18c-3662-4206-9ff5-de51a3aafb36",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"\"\"\"Example of how to add MemGPT into an AutoGen groupchat\n",
|
||||
"\n",
|
||||
"Based on the official AutoGen example here: https://github.com/microsoft/autogen/blob/main/notebook/agentchat_groupchat.ipynb\n",
|
||||
"\n",
|
||||
"Begin by doing:\n",
|
||||
" pip install \"pyautogen[teachable]\"\n",
|
||||
" pip install pymemgpt\n",
|
||||
" or\n",
|
||||
" pip install -e . (inside the MemGPT home directory)\n",
|
||||
"\"\"\"\n",
|
||||
"\n",
|
||||
"import os\n",
|
||||
"import autogen\n",
|
||||
"from memgpt.autogen.memgpt_agent import create_autogen_memgpt_agent\n",
|
||||
"\n",
|
||||
"config_list = [\n",
|
||||
" {\n",
|
||||
" \"model\": \"gpt-4\",\n",
|
||||
" \"api_key\": os.getenv(\"OPENAI_API_KEY\"),\n",
|
||||
" },\n",
|
||||
"]\n",
|
||||
"\n",
|
||||
"# If USE_MEMGPT is False, then this example will be the same as the official AutoGen repo (https://github.com/microsoft/autogen/blob/main/notebook/agentchat_groupchat.ipynb)\n",
|
||||
"# If USE_MEMGPT is True, then we swap out the \"coder\" agent with a MemGPT agent\n",
|
||||
"USE_MEMGPT = True\n",
|
||||
"# If DEBUG is False, a lot of MemGPT's inner workings output is suppressed and only the final send_message is displayed.\n",
|
||||
"# If DEBUG is True, then all of MemGPT's inner workings (function calls, etc.) will be output.\n",
|
||||
"DEBUG = False\n",
|
||||
"\n",
|
||||
"llm_config = {\"config_list\": config_list, \"seed\": 42}\n",
|
||||
"\n",
|
||||
"# The user agent\n",
|
||||
"user_proxy = autogen.UserProxyAgent(\n",
|
||||
" name=\"User_proxy\",\n",
|
||||
" system_message=\"A human admin.\",\n",
|
||||
" code_execution_config={\"last_n_messages\": 2, \"work_dir\": \"groupchat\"},\n",
|
||||
" human_input_mode=\"TERMINATE\", # needed?\n",
|
||||
")\n",
|
||||
"\n",
|
||||
"# The agent playing the role of the product manager (PM)\n",
|
||||
"pm = autogen.AssistantAgent(\n",
|
||||
" name=\"Product_manager\",\n",
|
||||
" system_message=\"Creative in software product ideas.\",\n",
|
||||
" llm_config=llm_config,\n",
|
||||
")\n",
|
||||
"\n",
|
||||
"if not USE_MEMGPT:\n",
|
||||
" # In the AutoGen example, we create an AssistantAgent to play the role of the coder\n",
|
||||
" coder = autogen.AssistantAgent(\n",
|
||||
" name=\"Coder\",\n",
|
||||
" llm_config=llm_config,\n",
|
||||
" )\n",
|
||||
"\n",
|
||||
"else:\n",
|
||||
" # In our example, we swap this AutoGen agent with a MemGPT agent\n",
|
||||
" # This MemGPT agent will have all the benefits of MemGPT, ie persistent memory, etc.\n",
|
||||
" coder = create_autogen_memgpt_agent(\n",
|
||||
" \"MemGPT_coder\",\n",
|
||||
" persona_description=\"I am a 10x engineer, trained in Python. I was the first engineer at Uber (which I make sure to tell everyone I work with).\",\n",
|
||||
" user_description=f\"You are participating in a group chat with a user ({user_proxy.name}) and a product manager ({pm.name}).\",\n",
|
||||
" interface_kwargs={\"debug\": DEBUG},\n",
|
||||
" )\n",
|
||||
"\n",
|
||||
"# Initialize the group chat between the user and two LLM agents (PM and coder)\n",
|
||||
"groupchat = autogen.GroupChat(agents=[user_proxy, pm, coder], messages=[], max_round=12)\n",
|
||||
"manager = autogen.GroupChatManager(groupchat=groupchat, llm_config=llm_config)\n",
|
||||
"\n",
|
||||
"# Begin the group chat with a message from the user\n",
|
||||
"user_proxy.initiate_chat(\n",
|
||||
" manager,\n",
|
||||
" message=\"I want to design an app to make me one million dollars in one month. Yes, your heard that right.\",\n",
|
||||
")"
|
||||
]
|
||||
}
|
||||
],
|
||||
"metadata": {
|
||||
"kernelspec": {
|
||||
"display_name": "Python 3 (ipykernel)",
|
||||
"language": "python",
|
||||
"name": "python3"
|
||||
},
|
||||
"language_info": {
|
||||
"codemirror_mode": {
|
||||
"name": "ipython",
|
||||
"version": 3
|
||||
},
|
||||
"file_extension": ".py",
|
||||
"mimetype": "text/x-python",
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.11.6"
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 5
|
||||
}
|
||||
@@ -14,22 +14,22 @@ class DummyInterface(object):
|
||||
def set_message_list(self, message_list):
|
||||
pass
|
||||
|
||||
async def internal_monologue(self, msg):
|
||||
def internal_monologue(self, msg):
|
||||
pass
|
||||
|
||||
async def assistant_message(self, msg):
|
||||
def assistant_message(self, msg):
|
||||
pass
|
||||
|
||||
async def memory_message(self, msg):
|
||||
def memory_message(self, msg):
|
||||
pass
|
||||
|
||||
async def system_message(self, msg):
|
||||
def system_message(self, msg):
|
||||
pass
|
||||
|
||||
async def user_message(self, msg, raw=False):
|
||||
def user_message(self, msg, raw=False):
|
||||
pass
|
||||
|
||||
async def function_message(self, msg):
|
||||
def function_message(self, msg):
|
||||
pass
|
||||
|
||||
|
||||
@@ -62,7 +62,7 @@ class AutoGenInterface(object):
|
||||
"""Clears the buffer. Call before every agent.step() when using MemGPT+AutoGen"""
|
||||
self.message_list = []
|
||||
|
||||
async def internal_monologue(self, msg):
|
||||
def internal_monologue(self, msg):
|
||||
# ANSI escape code for italic is '\x1B[3m'
|
||||
if self.debug:
|
||||
print(f"inner thoughts :: {msg}")
|
||||
@@ -71,25 +71,25 @@ class AutoGenInterface(object):
|
||||
message = f"\x1B[3m{Fore.LIGHTBLACK_EX}💭 {msg}{Style.RESET_ALL}" if self.fancy else f"[inner thoughts] {msg}"
|
||||
self.message_list.append(message)
|
||||
|
||||
async def assistant_message(self, msg):
|
||||
def assistant_message(self, msg):
|
||||
if self.debug:
|
||||
print(f"assistant :: {msg}")
|
||||
message = f"{Fore.YELLOW}{Style.BRIGHT}🤖 {Fore.YELLOW}{msg}{Style.RESET_ALL}" if self.fancy else msg
|
||||
self.message_list.append(message)
|
||||
|
||||
async def memory_message(self, msg):
|
||||
def memory_message(self, msg):
|
||||
if self.debug:
|
||||
print(f"memory :: {msg}")
|
||||
message = f"{Fore.LIGHTMAGENTA_EX}{Style.BRIGHT}🧠 {Fore.LIGHTMAGENTA_EX}{msg}{Style.RESET_ALL}" if self.fancy else f"[memory] {msg}"
|
||||
self.message_list.append(message)
|
||||
|
||||
async def system_message(self, msg):
|
||||
def system_message(self, msg):
|
||||
if self.debug:
|
||||
print(f"system :: {msg}")
|
||||
message = f"{Fore.MAGENTA}{Style.BRIGHT}🖥️ [system] {Fore.MAGENTA}{msg}{Style.RESET_ALL}" if self.fancy else f"[system] {msg}"
|
||||
self.message_list.append(message)
|
||||
|
||||
async def user_message(self, msg, raw=False):
|
||||
def user_message(self, msg, raw=False):
|
||||
if self.debug:
|
||||
print(f"user :: {msg}")
|
||||
if not self.show_user_message:
|
||||
@@ -126,7 +126,7 @@ class AutoGenInterface(object):
|
||||
|
||||
self.message_list.append(message)
|
||||
|
||||
async def function_message(self, msg):
|
||||
def function_message(self, msg):
|
||||
if self.debug:
|
||||
print(f"function :: {msg}")
|
||||
if not self.show_function_outputs:
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
from autogen.agentchat import Agent, ConversableAgent, UserProxyAgent, GroupChat, GroupChatManager
|
||||
from ..agent import AgentAsync
|
||||
from ..agent import Agent as _Agent
|
||||
|
||||
import asyncio
|
||||
from typing import Callable, Optional, List, Dict, Union, Any, Tuple
|
||||
@@ -11,6 +11,7 @@ from .. import constants
|
||||
from .. import presets
|
||||
from ..personas import personas
|
||||
from ..humans import humans
|
||||
from ..config import AgentConfig
|
||||
|
||||
|
||||
def create_memgpt_autogen_agent_from_config(
|
||||
@@ -86,7 +87,7 @@ def create_memgpt_autogen_agent_from_config(
|
||||
|
||||
def create_autogen_memgpt_agent(
|
||||
autogen_name,
|
||||
preset=presets.DEFAULT_PRESET,
|
||||
preset=presets.SYNC_CHAT,
|
||||
model=constants.DEFAULT_MEMGPT_MODEL,
|
||||
persona_description=personas.DEFAULT,
|
||||
user_description=humans.DEFAULT,
|
||||
@@ -112,8 +113,16 @@ def create_autogen_memgpt_agent(
|
||||
interface = AutoGenInterface(**interface_kwargs) if interface is None else interface
|
||||
persistence_manager = InMemoryStateManager(**persistence_manager_kwargs) if persistence_manager is None else persistence_manager
|
||||
|
||||
agent_config = AgentConfig(
|
||||
persona=persona_description,
|
||||
human=user_description,
|
||||
model=model,
|
||||
preset=presets.SYNC_CHAT,
|
||||
)
|
||||
|
||||
memgpt_agent = presets.use_preset(
|
||||
preset,
|
||||
agent_config,
|
||||
model,
|
||||
persona_description,
|
||||
user_description,
|
||||
@@ -133,7 +142,7 @@ class MemGPTAgent(ConversableAgent):
|
||||
def __init__(
|
||||
self,
|
||||
name: str,
|
||||
agent: AgentAsync,
|
||||
agent: _Agent,
|
||||
skip_verify=False,
|
||||
concat_other_agent_messages=False,
|
||||
is_termination_msg: Optional[Callable[[Dict], bool]] = None,
|
||||
@@ -142,7 +151,6 @@ class MemGPTAgent(ConversableAgent):
|
||||
self.agent = agent
|
||||
self.skip_verify = skip_verify
|
||||
self.concat_other_agent_messages = concat_other_agent_messages
|
||||
self.register_reply([Agent, None], MemGPTAgent._a_generate_reply_for_user_message)
|
||||
self.register_reply([Agent, None], MemGPTAgent._generate_reply_for_user_message)
|
||||
self.messages_processed_up_to_idx = 0
|
||||
|
||||
@@ -171,14 +179,6 @@ class MemGPTAgent(ConversableAgent):
|
||||
messages: Optional[List[Dict]] = None,
|
||||
sender: Optional[Agent] = None,
|
||||
config: Optional[Any] = None,
|
||||
) -> Tuple[bool, Union[str, Dict, None]]:
|
||||
return asyncio.run(self._a_generate_reply_for_user_message(messages=messages, sender=sender, config=config))
|
||||
|
||||
async def _a_generate_reply_for_user_message(
|
||||
self,
|
||||
messages: Optional[List[Dict]] = None,
|
||||
sender: Optional[Agent] = None,
|
||||
config: Optional[Any] = None,
|
||||
) -> Tuple[bool, Union[str, Dict, None]]:
|
||||
self.agent.interface.reset_message_list()
|
||||
|
||||
@@ -206,7 +206,7 @@ class MemGPTAgent(ConversableAgent):
|
||||
heartbeat_request,
|
||||
function_failed,
|
||||
token_warning,
|
||||
) = await self.agent.step(user_message, first_message=False, skip_verify=self.skip_verify)
|
||||
) = self.agent.step(user_message, first_message=False, skip_verify=self.skip_verify)
|
||||
# Skip user inputs if there's a memory warning, function execution failed, or the agent asked for control
|
||||
if token_warning:
|
||||
user_message = system.get_token_limit_warning()
|
||||
@@ -225,6 +225,7 @@ class MemGPTAgent(ConversableAgent):
|
||||
pretty_ret = MemGPTAgent.pretty_concat(self.agent.interface.message_list)
|
||||
self.messages_processed_up_to_idx += len(new_messages)
|
||||
return True, pretty_ret
|
||||
return asyncio.run(self._a_generate_reply_for_user_message(messages=messages, sender=sender, config=config))
|
||||
|
||||
@staticmethod
|
||||
def pretty_concat(messages):
|
||||
|
||||
@@ -4,11 +4,13 @@ from .prompts import gpt_system
|
||||
DEFAULT_PRESET = "memgpt_chat"
|
||||
preset_options = [DEFAULT_PRESET]
|
||||
|
||||
SYNC_CHAT = "memgpt_chat_sync" # TODO: remove me after we move the CLI to AgentSync
|
||||
|
||||
|
||||
def use_preset(preset_name, agent_config, model, persona, human, interface, persistence_manager):
|
||||
"""Storing combinations of SYSTEM + FUNCTION prompts"""
|
||||
|
||||
from memgpt.agent import AgentAsync
|
||||
from memgpt.agent import AgentAsync, Agent
|
||||
from memgpt.utils import printd
|
||||
|
||||
if preset_name == DEFAULT_PRESET:
|
||||
@@ -43,5 +45,37 @@ def use_preset(preset_name, agent_config, model, persona, human, interface, pers
|
||||
first_message_verify_mono=True if "gpt-4" in model else False,
|
||||
)
|
||||
|
||||
if preset_name == "memgpt_chat_sync": # TODO: remove me after we move the CLI to AgentSync
|
||||
functions = [
|
||||
"send_message",
|
||||
"pause_heartbeats",
|
||||
"core_memory_append",
|
||||
"core_memory_replace",
|
||||
"conversation_search",
|
||||
"conversation_search_date",
|
||||
"archival_memory_insert",
|
||||
"archival_memory_search",
|
||||
]
|
||||
available_functions = [v for k, v in gpt_functions.FUNCTIONS_CHAINING.items() if k in functions]
|
||||
printd(f"Available functions:\n", [x["name"] for x in available_functions])
|
||||
assert len(functions) == len(available_functions)
|
||||
|
||||
if "gpt-3.5" in model:
|
||||
# use a different system message for gpt-3.5
|
||||
preset_name = "memgpt_gpt35_extralong"
|
||||
|
||||
return Agent(
|
||||
config=agent_config,
|
||||
model=model,
|
||||
system=gpt_system.get_system_text(DEFAULT_PRESET),
|
||||
functions=available_functions,
|
||||
interface=interface,
|
||||
persistence_manager=persistence_manager,
|
||||
persona_notes=persona,
|
||||
human_notes=human,
|
||||
# gpt-3.5-turbo tends to omit inner monologue, relax this requirement for now
|
||||
first_message_verify_mono=True if "gpt-4" in model else False,
|
||||
)
|
||||
|
||||
else:
|
||||
raise ValueError(preset_name)
|
||||
|
||||
Reference in New Issue
Block a user