diff --git a/memgpt/autogen/examples/agent_autoreply.py b/memgpt/autogen/examples/agent_autoreply.py new file mode 100644 index 00000000..e4cf9cc0 --- /dev/null +++ b/memgpt/autogen/examples/agent_autoreply.py @@ -0,0 +1,66 @@ +"""Example of how to add MemGPT into an AutoGen groupchat + +Based on the official AutoGen example here: https://github.com/microsoft/autogen/blob/main/notebook/agentchat_groupchat.ipynb + +Begin by doing: + pip install "pyautogen[teachable]" + pip install pymemgpt + or + pip install -e . (inside the MemGPT home directory) +""" + +import os +import autogen +from memgpt.autogen.memgpt_agent import create_memgpt_autogen_agent_from_config + +config_list = [ + { + "model": "gpt-4", + "api_key": os.getenv("OPENAI_API_KEY"), + }, +] + +# If USE_MEMGPT is False, then this example will be the same as the official AutoGen repo +# (https://github.com/microsoft/autogen/blob/main/notebook/agentchat_groupchat.ipynb) +# If USE_MEMGPT is True, then we swap out the "coder" agent with a MemGPT agent +USE_MEMGPT = True + +llm_config = {"config_list": config_list, "seed": 42} + +# The user agent +user_proxy = autogen.UserProxyAgent( + name="User_proxy", + system_message="A human admin.", + code_execution_config={"last_n_messages": 2, "work_dir": "groupchat"}, + human_input_mode="TERMINATE", # needed? + default_auto_reply="You are going to figure all out by your own. " + "Work by yourself, the user won't reply until you output `TERMINATE` to end the conversation.", +) + +if not USE_MEMGPT: + # In the AutoGen example, we create an AssistantAgent to play the role of the coder + coder = autogen.AssistantAgent( + name="Coder", + llm_config=llm_config, + system_message=f"I am a 10x engineer, trained in Python. I was the first engineer at Uber " + f"(which I make sure to tell everyone I work with).", + human_input_mode="TERMINATE", + ) + +else: + # In our example, we swap this AutoGen agent with a MemGPT agent + # This MemGPT agent will have all the benefits of MemGPT, ie persistent memory, etc. + coder = create_memgpt_autogen_agent_from_config( + "MemGPT_coder", + llm_config=llm_config, + system_message=f"I am a 10x engineer, trained in Python. I was the first engineer at Uber " + f"(which I make sure to tell everyone I work with).", + human_input_mode="TERMINATE", + ) + +# Begin the group chat with a message from the user +user_proxy.initiate_chat( + coder, + message="I want to design an app to make me one million dollars in one month. " + "Tell me all the details, then try out every steps.", +) diff --git a/memgpt/autogen/examples/agent_groupchat.py b/memgpt/autogen/examples/agent_groupchat.py index e4d8224e..d6339baa 100644 --- a/memgpt/autogen/examples/agent_groupchat.py +++ b/memgpt/autogen/examples/agent_groupchat.py @@ -11,7 +11,7 @@ Begin by doing: import os import autogen -from memgpt.autogen.memgpt_agent import create_autogen_memgpt_agent +from memgpt.autogen.memgpt_agent import create_autogen_memgpt_agent, create_memgpt_autogen_agent_from_config config_list = [ { @@ -20,10 +20,13 @@ config_list = [ }, ] -# If USE_MEMGPT is False, then this example will be the same as the official AutoGen repo (https://github.com/microsoft/autogen/blob/main/notebook/agentchat_groupchat.ipynb) +# If USE_MEMGPT is False, then this example will be the same as the official AutoGen repo +# (https://github.com/microsoft/autogen/blob/main/notebook/agentchat_groupchat.ipynb) # If USE_MEMGPT is True, then we swap out the "coder" agent with a MemGPT agent USE_MEMGPT = True +USE_AUTOGEN_WORKFLOW = False + llm_config = {"config_list": config_list, "seed": 42} # The user agent @@ -51,13 +54,25 @@ if not USE_MEMGPT: else: # In our example, we swap this AutoGen agent with a MemGPT agent # This MemGPT agent will have all the benefits of MemGPT, ie persistent memory, etc. - coder = create_autogen_memgpt_agent( - "MemGPT_coder", - persona_description="I am a 10x engineer, trained in Python. I was the first engineer at Uber (which I make sure to tell everyone I work with).", - user_description=f"You are participating in a group chat with a user ({user_proxy.name}) and a product manager ({pm.name}).", - # extra options - # interface_kwargs={"debug": True}, - ) + if not USE_AUTOGEN_WORKFLOW: + coder = create_autogen_memgpt_agent( + "MemGPT_coder", + persona_description="I am a 10x engineer, trained in Python. I was the first engineer at Uber " + "(which I make sure to tell everyone I work with).", + user_description=f"You are participating in a group chat with a user ({user_proxy.name}) " + f"and a product manager ({pm.name}).", + # extra options + # interface_kwargs={"debug": True}, + ) + else: + coder = create_memgpt_autogen_agent_from_config( + "MemGPT_coder", + llm_config=llm_config, + system_message=f"I am a 10x engineer, trained in Python. I was the first engineer at Uber " + f"(which I make sure to tell everyone I work with).\n" + f"You are participating in a group chat with a user ({user_proxy.name}) " + f"and a product manager ({pm.name}).", + ) # Initialize the group chat between the user and two LLM agents (PM and coder) groupchat = autogen.GroupChat(agents=[user_proxy, pm, coder], messages=[], max_round=12) @@ -66,5 +81,6 @@ manager = autogen.GroupChatManager(groupchat=groupchat, llm_config=llm_config) # Begin the group chat with a message from the user user_proxy.initiate_chat( manager, - message="I want to design an app to make me one million dollars in one month. Yes, your heard that right.", + message="I want to design an app to make me one million dollars in one month. " + "Yes, your heard that right.", ) diff --git a/memgpt/autogen/memgpt_agent.py b/memgpt/autogen/memgpt_agent.py index 66a14d5e..ac9a2cdd 100644 --- a/memgpt/autogen/memgpt_agent.py +++ b/memgpt/autogen/memgpt_agent.py @@ -1,4 +1,4 @@ -from autogen.agentchat import ConversableAgent, Agent +from autogen.agentchat import Agent, ConversableAgent, UserProxyAgent, GroupChat, GroupChatManager from ..agent import AgentAsync import asyncio @@ -18,16 +18,70 @@ def create_memgpt_autogen_agent_from_config( system_message: Optional[str] = "You are a helpful AI Assistant.", is_termination_msg: Optional[Callable[[Dict], bool]] = None, max_consecutive_auto_reply: Optional[int] = None, - human_input_mode: Optional[str] = "TERMINATE", + human_input_mode: Optional[str] = "ALWAYS", function_map: Optional[Dict[str, Callable]] = None, code_execution_config: Optional[Union[Dict, bool]] = None, llm_config: Optional[Union[Dict, bool]] = None, default_auto_reply: Optional[Union[str, Dict, None]] = "", ): - """ - TODO support AutoGen config workflow in a clean way with constructors - """ - raise NotImplementedError + """Construct AutoGen config workflow in a clean way.""" + + model = constants.DEFAULT_MEMGPT_MODEL if llm_config is None else llm_config["config_list"][0]["model"] + persona_desc = personas.DEFAULT if system_message == "" else system_message + if human_input_mode == "ALWAYS": + user_desc = humans.DEFAULT + elif human_input_mode == "TERMINATE": + user_desc = "Work by yourself, the user won't reply until you output `TERMINATE` to end the conversation." + else: + user_desc = "Work by yourself, the user won't reply. Elaborate as much as possible." + + if function_map is not None or code_execution_config is not None: + raise NotImplementedError + + autogen_memgpt_agent = create_autogen_memgpt_agent( + name, + preset=presets.DEFAULT, + model=model, + persona_description=persona_desc, + user_description=user_desc, + is_termination_msg=is_termination_msg, + ) + + if human_input_mode != "ALWAYS": + coop_agent1 = create_autogen_memgpt_agent( + name, + preset=presets.DEFAULT, + model=model, + persona_description=persona_desc, + user_description=user_desc, + is_termination_msg=is_termination_msg, + ) + if default_auto_reply != "": + coop_agent2 = UserProxyAgent( + name, + human_input_mode="NEVER", + default_auto_reply=default_auto_reply, + ) + else: + coop_agent2 = create_autogen_memgpt_agent( + name, + preset=presets.DEFAULT, + model=model, + persona_description=persona_desc, + user_description=user_desc, + is_termination_msg=is_termination_msg, + ) + + groupchat = GroupChat( + agents=[autogen_memgpt_agent, coop_agent1, coop_agent2], + messages=[], + max_round=12 if max_consecutive_auto_reply is None else max_consecutive_auto_reply + ) + manager = GroupChatManager(name=name, groupchat=groupchat, llm_config=llm_config) + return manager + + else: + return autogen_memgpt_agent def create_autogen_memgpt_agent( @@ -40,6 +94,7 @@ def create_autogen_memgpt_agent( interface_kwargs={}, persistence_manager=None, persistence_manager_kwargs={}, + is_termination_msg: Optional[Callable[[Dict], bool]] = None, ): """ See AutoGenInterface.__init__ for available options you can pass into @@ -69,6 +124,7 @@ def create_autogen_memgpt_agent( autogen_memgpt_agent = MemGPTAgent( name=autogen_name, agent=memgpt_agent, + is_termination_msg=is_termination_msg, ) return autogen_memgpt_agent @@ -80,6 +136,7 @@ class MemGPTAgent(ConversableAgent): agent: AgentAsync, skip_verify=False, concat_other_agent_messages=False, + is_termination_msg: Optional[Callable[[Dict], bool]] = None, ): super().__init__(name) self.agent = agent @@ -89,6 +146,10 @@ class MemGPTAgent(ConversableAgent): self.register_reply([Agent, None], MemGPTAgent._generate_reply_for_user_message) self.messages_processed_up_to_idx = 0 + self._is_termination_msg = ( + is_termination_msg if is_termination_msg is not None else (lambda x: x == "TERMINATE") + ) + def format_other_agent_message(self, msg): if "name" in msg: user_message = f"{msg['name']}: {msg['content']}" @@ -158,6 +219,10 @@ class MemGPTAgent(ConversableAgent): else: break + # Stop the conversation + if self._is_termination_msg(new_messages[-1]['content']): + return True, None + # Pass back to AutoGen the pretty-printed calls MemGPT made to the interface pretty_ret = MemGPTAgent.pretty_concat(self.agent.interface.message_list) self.messages_processed_up_to_idx += len(new_messages)