* First commit of memgpt client and some messy test code * rolled back unnecessary changes to abstract interface; switched client to always use Queueing Interface * Added missing interface clear() in run_command; added convenience method for checking if an agent exists, used that in create_agent * Formatting fixes * Fixed incorrect naming of get_agent_memory in rest server * Removed erroneous clear from client save method; Replaced print statements with appropriate logger calls in server * Updated readme with client usage instructions * added tests for Client * make printing to terminal togglable on queininginterface (should probably refactor this to a logger) * turn off printing to stdout via interface by default * allow importing the python client in a similar fashion to openai-python (see https://github.com/openai/openai-python) * Allowed quickstart on init of client; updated readme and test_client accordingly * oops, fixed name of openai_api_key config key * Fixed small typo * Fixed broken test by adding memgpt hosted model details to agent config * silence llamaindex 'LLM is explicitly disabled. Using MockLLM.' on server * default to openai if user's memgpt directory is empty (first time) * correct type hint * updated section on client in readme * added comment about how MemGPT config != Agent config * patch unrelated test * update wording on readme * patch another unrelated test * added python client to readme docs * Changed 'user' to 'human' in example; Defaulted AgentConfig.model to 'None'; Fixed issue in create_agent (accounting for dict config); matched test code to example * Fixed advanced example * patch test * patch --------- Co-authored-by: cpacker <packercharles@gmail.com>
74 lines
2.0 KiB
Python
74 lines
2.0 KiB
Python
import os
|
|
import pexpect
|
|
|
|
from memgpt.config import MemGPTConfig
|
|
|
|
from .constants import TIMEOUT
|
|
|
|
|
|
def wipe_config():
|
|
if MemGPTConfig.exists():
|
|
# delete
|
|
if os.getenv("MEMGPT_CONFIG_PATH"):
|
|
config_path = os.getenv("MEMGPT_CONFIG_PATH")
|
|
else:
|
|
config_path = MemGPTConfig.config_path
|
|
# TODO delete file config_path
|
|
os.remove(config_path)
|
|
|
|
|
|
def configure_memgpt_localllm():
|
|
wipe_config()
|
|
child = pexpect.spawn("memgpt configure")
|
|
|
|
child.expect("Select LLM inference provider", timeout=TIMEOUT)
|
|
child.send("\x1b[B") # Send the down arrow key
|
|
child.send("\x1b[B") # Send the down arrow key
|
|
child.sendline()
|
|
|
|
child.expect("Select LLM backend", timeout=TIMEOUT)
|
|
child.sendline()
|
|
|
|
child.expect("Enter default endpoint", timeout=TIMEOUT)
|
|
child.sendline()
|
|
|
|
child.expect("Select default model wrapper", timeout=TIMEOUT)
|
|
child.sendline()
|
|
|
|
child.expect("Select your model's context window", timeout=TIMEOUT)
|
|
child.sendline()
|
|
|
|
child.expect("Select embedding provider", timeout=TIMEOUT)
|
|
child.send("\x1b[B") # Send the down arrow key
|
|
child.send("\x1b[B") # Send the down arrow key
|
|
child.send("\x1b[B") # Send the down arrow key
|
|
child.sendline()
|
|
|
|
child.expect("Select default preset", timeout=TIMEOUT)
|
|
child.sendline()
|
|
|
|
child.expect("Select default persona", timeout=TIMEOUT)
|
|
child.sendline()
|
|
|
|
child.expect("Select default human", timeout=TIMEOUT)
|
|
child.sendline()
|
|
|
|
child.expect("Select storage backend for archival data", timeout=TIMEOUT)
|
|
child.sendline()
|
|
|
|
child.sendline()
|
|
|
|
child.expect(pexpect.EOF, timeout=TIMEOUT) # Wait for child to exit
|
|
child.close()
|
|
assert child.isalive() is False, "CLI should have terminated."
|
|
assert child.exitstatus == 0, "CLI did not exit cleanly."
|
|
|
|
|
|
def configure_memgpt(enable_openai=False, enable_azure=False):
|
|
if enable_openai:
|
|
raise NotImplementedError
|
|
elif enable_azure:
|
|
raise NotImplementedError
|
|
else:
|
|
configure_memgpt_localllm()
|