* First commit of memgpt client and some messy test code * rolled back unnecessary changes to abstract interface; switched client to always use Queueing Interface * Added missing interface clear() in run_command; added convenience method for checking if an agent exists, used that in create_agent * Formatting fixes * Fixed incorrect naming of get_agent_memory in rest server * Removed erroneous clear from client save method; Replaced print statements with appropriate logger calls in server * Updated readme with client usage instructions * added tests for Client * make printing to terminal togglable on queininginterface (should probably refactor this to a logger) * turn off printing to stdout via interface by default * allow importing the python client in a similar fashion to openai-python (see https://github.com/openai/openai-python) * Allowed quickstart on init of client; updated readme and test_client accordingly * oops, fixed name of openai_api_key config key * Fixed small typo * Fixed broken test by adding memgpt hosted model details to agent config * silence llamaindex 'LLM is explicitly disabled. Using MockLLM.' on server * default to openai if user's memgpt directory is empty (first time) * correct type hint * updated section on client in readme * added comment about how MemGPT config != Agent config * patch unrelated test * update wording on readme * patch another unrelated test * added python client to readme docs * Changed 'user' to 'human' in example; Defaulted AgentConfig.model to 'None'; Fixed issue in create_agent (accounting for dict config); matched test code to example * Fixed advanced example * patch test * patch --------- Co-authored-by: cpacker <packercharles@gmail.com>
82 lines
2.4 KiB
Python
82 lines
2.4 KiB
Python
import asyncio
|
|
import queue
|
|
|
|
from memgpt.interface import AgentInterface
|
|
|
|
|
|
class QueuingInterface(AgentInterface):
|
|
"""Messages are queued inside an internal buffer and manually flushed"""
|
|
|
|
def __init__(self, debug=True):
|
|
self.buffer = queue.Queue()
|
|
self.debug = debug
|
|
|
|
def to_list(self):
|
|
"""Convert queue to a list (empties it out at the same time)"""
|
|
items = []
|
|
while not self.buffer.empty():
|
|
try:
|
|
items.append(self.buffer.get_nowait())
|
|
except queue.Empty:
|
|
break
|
|
if len(items) > 1 and items[-1] == "STOP":
|
|
items.pop()
|
|
return items
|
|
|
|
def clear(self):
|
|
"""Clear all messages from the queue."""
|
|
with self.buffer.mutex:
|
|
# Empty the queue
|
|
self.buffer.queue.clear()
|
|
|
|
async def message_generator(self):
|
|
while True:
|
|
if not self.buffer.empty():
|
|
message = self.buffer.get()
|
|
if message == "STOP":
|
|
break
|
|
yield message
|
|
else:
|
|
await asyncio.sleep(0.1) # Small sleep to prevent a busy loop
|
|
|
|
def step_yield(self):
|
|
"""Enqueue a special stop message"""
|
|
self.buffer.put("STOP")
|
|
|
|
def user_message(self, msg: str):
|
|
"""Handle reception of a user message"""
|
|
pass
|
|
|
|
def internal_monologue(self, msg: str) -> None:
|
|
"""Handle the agent's internal monologue"""
|
|
if self.debug:
|
|
print(msg)
|
|
self.buffer.put({"internal_monologue": msg})
|
|
|
|
def assistant_message(self, msg: str) -> None:
|
|
"""Handle the agent sending a message"""
|
|
if self.debug:
|
|
print(msg)
|
|
self.buffer.put({"assistant_message": msg})
|
|
|
|
def function_message(self, msg: str) -> None:
|
|
"""Handle the agent calling a function"""
|
|
if self.debug:
|
|
print(msg)
|
|
|
|
if msg.startswith("Running "):
|
|
msg = msg.replace("Running ", "")
|
|
self.buffer.put({"function_call": msg})
|
|
|
|
elif msg.startswith("Success: "):
|
|
msg = msg.replace("Success: ", "")
|
|
self.buffer.put({"function_return": msg, "status": "success"})
|
|
|
|
elif msg.startswith("Error: "):
|
|
msg = msg.replace("Error: ", "")
|
|
self.buffer.put({"function_return": msg, "status": "error"})
|
|
|
|
else:
|
|
# NOTE: generic, should not happen
|
|
self.buffer.put({"function_message": msg})
|