diff --git a/.github/workflows/tests.yml b/.github/workflows/tests.yml
index 05ba22bb..b198af05 100644
--- a/.github/workflows/tests.yml
+++ b/.github/workflows/tests.yml
@@ -68,7 +68,7 @@ jobs:
MEMGPT_SERVER_PASS: test_server_token
PYTHONPATH: ${{ github.workspace }}:${{ env.PYTHONPATH }}
run: |
- poetry run pytest -s -vv -k "not test_quickstart and not test_endpoints and not test_storage and not test_server and not test_openai_client" tests
+ poetry run pytest -s -vv -k "not test_concurrent_connections.py and not test_quickstart and not test_endpoints and not test_storage and not test_server and not test_openai_client" tests
- name: Run storage tests
env:
diff --git a/Dockerfile b/Dockerfile
index a4249f5b..ca7ea8b9 100644
--- a/Dockerfile
+++ b/Dockerfile
@@ -16,7 +16,7 @@ RUN poetry lock --no-update
RUN if [ "$MEMGPT_ENVIRONMENT" = "DEVELOPMENT" ] ; then \
poetry install --no-root -E "postgres server dev autogen" ; \
else \
- poetry install --without dev --no-root -E "postgres server" && \
+ poetry install --without dev --without local --no-root -E "postgres server" && \
rm -rf $POETRY_CACHE_DIR ; \
fi
diff --git a/dev-compose.yaml b/dev-compose.yaml
index 944b6b14..32b66817 100644
--- a/dev-compose.yaml
+++ b/dev-compose.yaml
@@ -35,6 +35,7 @@ services:
- MEMGPT_PG_HOST=pgvector_db
- MEMGPT_PG_PORT=5432
- OPENAI_API_KEY=${OPENAI_API_KEY}
+ - SERPAPI_API_KEY=${SERPAPI_API_KEY}
volumes:
- ./configs/server_config.yaml:/root/.memgpt/config # config file
# ~/.memgpt/credentials:/root/.memgpt/credentials # credentials file
diff --git a/examples/tutorials/dev_portal_agent_chat.png b/examples/tutorials/dev_portal_agent_chat.png
new file mode 100644
index 00000000..89042f70
Binary files /dev/null and b/examples/tutorials/dev_portal_agent_chat.png differ
diff --git a/examples/tutorials/dev_portal_memory.png b/examples/tutorials/dev_portal_memory.png
new file mode 100644
index 00000000..c1717436
Binary files /dev/null and b/examples/tutorials/dev_portal_memory.png differ
diff --git a/examples/tutorials/dev_portal_tools.png b/examples/tutorials/dev_portal_tools.png
new file mode 100644
index 00000000..57b85498
Binary files /dev/null and b/examples/tutorials/dev_portal_tools.png differ
diff --git a/examples/tutorials/developer_portal_login.png b/examples/tutorials/developer_portal_login.png
new file mode 100644
index 00000000..6234496b
Binary files /dev/null and b/examples/tutorials/developer_portal_login.png differ
diff --git a/examples/tutorials/local-python-client.ipynb b/examples/tutorials/local-python-client.ipynb
new file mode 100644
index 00000000..b4db0d68
--- /dev/null
+++ b/examples/tutorials/local-python-client.ipynb
@@ -0,0 +1,239 @@
+{
+ "cells": [
+ {
+ "cell_type": "markdown",
+ "id": "c015b59e-1187-4d45-b2af-7b4c5a9512e1",
+ "metadata": {},
+ "source": [
+ "# MemGPT Python Client \n",
+ "Welcome to the MemGPT tutorial! In this tutorial, we'll go through how to create a basic user-client for MemGPT and create a custom agent with long term memory. \n",
+ "\n",
+ "MemGPT runs *agents-as-a-service*, so agents can run independently on a server. For this tutorial, we will run a local version of the client which does not require a server, but still allows you to see some of MemGPT's capabilities. "
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "id": "a34fe313-f63e-4f36-9142-f681431bbb91",
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "!pip install git+https://github.com/cpacker/MemGPT.git@tutorials"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "id": "191c1cf1-03e6-411a-8409-003caa8530f5",
+ "metadata": {},
+ "source": [
+ "### Setup your OpenAI API key "
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 6,
+ "id": "23091690-bc50-4fbc-b48d-50b639453e36",
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "import os \n",
+ "\n",
+ "os.environ[\"OPENAI_API_KEY\"] = \"sk-...\""
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "id": "f20ad6c7-9066-45e0-88ac-40920c83cc39",
+ "metadata": {},
+ "source": [
+ "## Part 1: Connecting to the MemGPT Client \n",
+ "\n",
+ "We create a local client which creates a quickstart configuration for OpenAI using the provided `OPENAI_API_KEY`. "
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "id": "9b0871a0-42af-4573-a8ba-efb4fe7e5e5a",
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "from memgpt.client.client import LocalClient\n",
+ "\n",
+ "client = LocalClient(quickstart_option=\"openai\") "
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "id": "40666896-0fa2-465e-b51b-57719de30542",
+ "metadata": {},
+ "source": [
+ "## Part 2: Create an agent \n",
+ "We'll first start with creating a basic MemGPT agent. "
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "id": "fb90f12b-acd7-4877-81e8-0e7b9eb4bd9b",
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "basic_agent = client.create_agent(\n",
+ " name=\"basic_agent\", \n",
+ ")\n",
+ "print(f\"Created agent: {basic_agent.name}\")"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "id": "94d14102-3ef8-40fe-b32e-c77d0b8df311",
+ "metadata": {},
+ "source": [
+ "We can now send messages from the user to the agent by specifying the `agent_id`: "
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "id": "3cbfef36-76f0-4f0b-990a-5d8409a676d7",
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "from memgpt.client.utils import pprint \n",
+ "\n",
+ "response = client.user_message(agent_id=basic_agent.id, message=\"hello\") \n",
+ "pprint(response.messages)"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "id": "b24d048e-f3cc-4830-aaa2-5e590d652bd9",
+ "metadata": {},
+ "source": [
+ "### Adding Personalization\n",
+ "We can now create a more customized agent, but specifying a custom `human` and `persona` field. \n",
+ "* The *human* specifies the personalization information about the user interacting with the agent \n",
+ "* The *persona* specifies the behavior and personality of the event\n",
+ "\n",
+ "What makes MemGPT unique is that the starting *persona* and *human* can change over time as the agent gains new information, enabling it to have evolving memory. We'll see an example of this later in the tutorial."
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "id": "3ec35979-9102-4ea7-926e-ea7ccd501ceb",
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "# TODO: feel free to change the human and person to what you'd like \n",
+ "persona = \\\n",
+ "\"\"\"\n",
+ "You are a friendly and helpful agent!\n",
+ "\"\"\"\n",
+ "\n",
+ "human = \\\n",
+ "\"\"\"\n",
+ "I am an Accenture consultant with many specializations. My name is Sarah.\n",
+ "\"\"\"\n",
+ "\n",
+ "custom_agent = client.create_agent(\n",
+ " name=\"custom_agent\", \n",
+ " human=human, \n",
+ " persona=persona\n",
+ ")"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "id": "63a9a61b-58c9-4d09-a4f7-48233c72c340",
+ "metadata": {},
+ "source": [
+ "### Viewing memory \n",
+ "You can access the agent's memories through the client. There are two type of memory, *core* and *archival* memory: \n",
+ "1. Core memory stores short-term memories in the LLM's context \n",
+ "2. Archival memory stores long term memories in a vector database\n",
+ "\n",
+ "Core memory is divided into a \"human\" and \"persona\" section. You can see the agent's memories about the human below: "
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "id": "b0d1840a-05ee-47c1-b5f5-89faafd96e7c",
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "print(client.get_agent_memory(agent_id=custom_agent.id)[\"core_memory\"][\"human\"])"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "id": "95c8a058-5d67-45b7-814b-38bb67c9acf3",
+ "metadata": {},
+ "source": [
+ "### Evolving memory \n",
+ "MemGPT agents have long term memory, and can evolve what they store in their memory over time. In the example below, we make a correction to the previously provided information. See how the agent processes this new information. "
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "id": "7e58e685-579e-4a0d-bba7-41976ea7f469",
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "response = client.user_message(agent_id=custom_agent.id, message=\"Actually, my name is Charles\") \n",
+ "pprint(response.messages)"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "id": "af2a2dd6-925e-49b2-ab01-bf837f33b26c",
+ "metadata": {},
+ "source": [
+ "Now lets see what the agent's memory looks like again: "
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "id": "41ef4aaa-4a48-44bb-8944-855f30725d6d",
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "print(client.get_agent_memory(agent_id=custom_agent.id)[\"core_memory\"][\"human\"])"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "id": "66da949b-1084-4b87-b77c-6cbd4a822b34",
+ "metadata": {},
+ "source": [
+ "## 🎉 Congrats, you're done with day 1 of MemGPT! \n",
+ "For day 2, we'll go over how to connect *data sources* to MemGPT to run RAG agents. "
+ ]
+ }
+ ],
+ "metadata": {
+ "kernelspec": {
+ "display_name": "memgpt",
+ "language": "python",
+ "name": "memgpt"
+ },
+ "language_info": {
+ "codemirror_mode": {
+ "name": "ipython",
+ "version": 3
+ },
+ "file_extension": ".py",
+ "mimetype": "text/x-python",
+ "name": "python",
+ "nbconvert_exporter": "python",
+ "pygments_lexer": "ipython3",
+ "version": "3.12.2"
+ }
+ },
+ "nbformat": 4,
+ "nbformat_minor": 5
+}
diff --git a/examples/tutorials/memgpt-admin-client.ipynb b/examples/tutorials/memgpt-admin-client.ipynb
new file mode 100644
index 00000000..e627094d
--- /dev/null
+++ b/examples/tutorials/memgpt-admin-client.ipynb
@@ -0,0 +1,50 @@
+{
+ "cells": [
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "id": "fb13c7bc-fbb4-4ccd-897c-08995db258e8",
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "from memgpt import Admin \n",
+ "\n",
+ "base_url=\"memgpt.localhost\"\n",
+ "token=\"memgptadmin\" \n",
+ "\n",
+ "admin_client = Admin(base_url=base_url, token=\"memgptadmin\")"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "id": "984b8249-a3f7-40d1-9691-4d128f9a90ff",
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "user = admin_client.create_user()"
+ ]
+ }
+ ],
+ "metadata": {
+ "kernelspec": {
+ "display_name": "memgpt",
+ "language": "python",
+ "name": "memgpt"
+ },
+ "language_info": {
+ "codemirror_mode": {
+ "name": "ipython",
+ "version": 3
+ },
+ "file_extension": ".py",
+ "mimetype": "text/x-python",
+ "name": "python",
+ "nbconvert_exporter": "python",
+ "pygments_lexer": "ipython3",
+ "version": "3.12.2"
+ }
+ },
+ "nbformat": 4,
+ "nbformat_minor": 5
+}
diff --git a/examples/tutorials/memgpt_paper.pdf b/examples/tutorials/memgpt_paper.pdf
new file mode 100644
index 00000000..d2c8bd78
Binary files /dev/null and b/examples/tutorials/memgpt_paper.pdf differ
diff --git a/examples/tutorials/memgpt_rag_agent.ipynb b/examples/tutorials/memgpt_rag_agent.ipynb
new file mode 100644
index 00000000..6dfc0c3e
--- /dev/null
+++ b/examples/tutorials/memgpt_rag_agent.ipynb
@@ -0,0 +1,125 @@
+{
+ "cells": [
+ {
+ "cell_type": "markdown",
+ "id": "64fa991c-98e5-4be0-a838-06a4617d8be3",
+ "metadata": {},
+ "source": [
+ "## Part 4: Adding external data \n",
+ "In addition to short term, in-context memories, MemGPT agents also have a long term memory store called *archival memory*. We can enable agents to leverage external data (e.g. PDF files, database records, etc.) by inserting data into archival memory. In this example, we'll show how to load the MemGPT paper a *source*, which defines a set of data that can be attached to agents. "
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "id": "c61ac9c3-cbea-47a5-a6a4-4133ffe5984e",
+ "metadata": {},
+ "source": [
+ "We first download a PDF file, the MemGPT paper: "
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "id": "f89e9156-3d2d-4ce6-b5e9-aeb4cdfd5657",
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "import requests\n",
+ "\n",
+ "url = \"https://arxiv.org/pdf/2310.08560\"\n",
+ "response = requests.get(url)\n",
+ "filename = \"memgpt_paper.pdf\"\n",
+ "\n",
+ "with open(filename, 'wb') as f:\n",
+ " f.write(response.content)"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "id": "bcfe3a48-cdb0-4843-9599-623753eb61b9",
+ "metadata": {},
+ "source": [
+ "Next, we create a MemGPT source to load data into: "
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "id": "7ccf21fb-5862-42c2-96ca-63e0ba2f48b5",
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "memgpt_paper = client.create_source(\n",
+ " name=\"memgpt_paper\", \n",
+ ")"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "id": "f114bf0b-6a25-4dbf-9c2c-59271d46ebba",
+ "metadata": {},
+ "source": [
+ "Now that we have a source, we can load files into the source. Loading the file will take a bit of time, since the file needs to be parsed and stored as *embeddings* using an embedding model. The loading function returns a *job* which can be pinged for a status. "
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "id": "6fe624eb-bf08-4267-a849-06103c1ad5b6",
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "job = client.load_file_into_source(filename=filename, source_id=memgpt_paper.id)\n",
+ "job"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "id": "27ce13f5-d878-406d-9a5f-7e2335f2ef0d",
+ "metadata": {},
+ "source": [
+ "### Attaching data to an agent \n",
+ "To allow an agent to access data in a source, we need to *attach* it to the agent. This will load the source's data into the agent's archival memory. "
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "id": "5be91571-87ee-411a-8e79-25c56c414360",
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "client.attach_source_to_agent(source_id=memgpt_paper.id, agent_id=basic_agent.id)\n",
+ "# TODO: add system message saying that file has been attached \n",
+ "\n",
+ "from pprint import pprint\n",
+ "\n",
+ "# TODO: do soemthing accenture related \n",
+ "# TODO: brag about query rewriting -- hyde paper \n",
+ "response = client.user_message(agent_id=basic_agent.id, message=\"what is core memory? search your archival memory.\") \n",
+ "pprint(response.messages)"
+ ]
+ }
+ ],
+ "metadata": {
+ "kernelspec": {
+ "display_name": "memgpt",
+ "language": "python",
+ "name": "memgpt"
+ },
+ "language_info": {
+ "codemirror_mode": {
+ "name": "ipython",
+ "version": 3
+ },
+ "file_extension": ".py",
+ "mimetype": "text/x-python",
+ "name": "python",
+ "nbconvert_exporter": "python",
+ "pygments_lexer": "ipython3",
+ "version": "3.12.2"
+ }
+ },
+ "nbformat": 4,
+ "nbformat_minor": 5
+}
diff --git a/examples/tutorials/python-client.ipynb b/examples/tutorials/python-client.ipynb
new file mode 100644
index 00000000..b66cc702
--- /dev/null
+++ b/examples/tutorials/python-client.ipynb
@@ -0,0 +1,319 @@
+{
+ "cells": [
+ {
+ "cell_type": "markdown",
+ "id": "6d3806ac-38f3-4999-bbed-953037bd0fd9",
+ "metadata": {},
+ "source": [
+ "# MemGPT Python Client \n",
+ "Welcome to the MemGPT tutorial! In this tutorial, we'll go through how to create a basic user-client for MemGPT and create a custom agent with long term memory. \n",
+ "\n",
+ "MemGPT runs *agents-as-a-service*, so agents can run independently on a server. For this tutorial, we will be connecting to an existing MemGPT server via the Python client and the UI console. If you don't have a running server, see the [documentation](https://memgpt.readme.io/docs/running-a-memgpt-server) for instructions on how to create one. "
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "id": "7c0b6d6b-dbe6-412b-b129-6d7eb7d626a3",
+ "metadata": {},
+ "source": [
+ "## Part 0: Install MemGPT "
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "id": "481d0976-d26b-46d2-ba74-8f2bb5556387",
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "!pip install git+https://github.com/cpacker/MemGPT.git@tutorials"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "id": "a0484348-f7b2-48e3-9a2f-7d6495ef76e3",
+ "metadata": {},
+ "source": [
+ "## Part 1: Connecting to the MemGPT Client \n",
+ "\n",
+ "The MemGPT client connects to a running MemGPT service, specified by `base_url`. The client corresponds to a *single-user* (you), so requires an authentication token to let the service know who you are. \n",
+ "\n"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 19,
+ "id": "53ae2e1b-ad22-43c2-b3d8-92d591be8840",
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "from memgpt import create_client\n",
+ "\n",
+ "base_url = \"http://35.238.125.250:8083\"\n",
+ "\n",
+ "# TODO: replace with your token \n",
+ "my_token = \"sk-...\" \n",
+ "\n",
+ "client = create_client(base_url=base_url, token=my_token) "
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "id": "3c5c8651-e8aa-4423-b2b8-284bf6a01577",
+ "metadata": {},
+ "source": [
+ "### Viewing the developer portal \n",
+ "MemGPT provides a portal interface for viewing and interacting with agents, data sources, tools, and more. You can enter `http://35.238.125.250:8083` into your browser to load the developer portal, and enter in `my_token` to log in. \n",
+ "\n",
+ "
"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "id": "66e47b34-5feb-4660-85f0-14b5ee7f62b9",
+ "metadata": {},
+ "source": [
+ "## Part 2: Create an agent \n",
+ "We'll first start with creating a basic MemGPT agent. "
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "id": "24745606-b0fb-4157-a5cd-82fd0c26711f",
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "basic_agent = client.create_agent(\n",
+ " name=\"basic_agent\", \n",
+ ")\n",
+ "print(f\"Created agent: {basic_agent.name}\")"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "id": "fcfb0d7b-b260-4bc0-8db2-c65f40e4afd5",
+ "metadata": {},
+ "source": [
+ "We can now send messages from the user to the agent by specifying the `agent_id`: "
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "id": "a37bc9aa-4efb-4b4d-a6ce-f02505cb3240",
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "from memgpt.client.utils import pprint \n",
+ "\n",
+ "response = client.user_message(agent_id=basic_agent.id, message=\"hello\") \n",
+ "pprint(response.messages)"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "id": "9803140c-2b9d-426b-8812-9295806eb312",
+ "metadata": {},
+ "source": [
+ "### Chatting in the developer portal \n",
+ "You can also chat with the agent inside of the developer portal. Try clicking the chat button in the agent view. \n",
+ "\n",
+ "
"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "id": "99ae20ec-e92e-4480-a652-b4aea28a6199",
+ "metadata": {},
+ "source": [
+ "### Adding Personalization\n",
+ "We can now create a more customized agent, but specifying a custom `human` and `persona` field. \n",
+ "* The *human* specifies the personalization information about the user interacting with the agent \n",
+ "* The *persona* specifies the behavior and personality of the event\n",
+ "\n",
+ "What makes MemGPT unique is that the starting *persona* and *human* can change over time as the agent gains new information, enabling it to have evolving memory. We'll see an example of this later in the tutorial."
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "id": "c0876410-4d70-490d-a798-39938b5ce941",
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "# TODO: feel free to change the human and person to what you'd like \n",
+ "persona = \\\n",
+ "\"\"\"\n",
+ "You are a friendly and helpful agent!\n",
+ "\"\"\"\n",
+ "\n",
+ "human = \\\n",
+ "\"\"\"\n",
+ "I am an Accenture consultant with many specializations. My name is Sarah.\n",
+ "\"\"\"\n",
+ "\n",
+ "custom_agent = client.create_agent(\n",
+ " name=\"custom_agent\", \n",
+ " human=human, \n",
+ " persona=persona\n",
+ ")"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "id": "21293857-80e4-46e4-b628-3912fad038e9",
+ "metadata": {},
+ "source": [
+ "### Viewing memory \n",
+ "You can view and edit the agent's memory inside of the developer console. There are two type of memory, *core* and *archival* memory: \n",
+ "1. Core memory stores short-term memories in the LLM's context \n",
+ "2. Archival memory stores long term memories in a vector database\n",
+ "\n",
+ "In this example, we'll look at how the agent can modify its core memory with new information. To see the agent's memory, click the \"Core Memory\" section on the developer console. \n",
+ "\n",
+ "
"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "id": "d8fa13eb-ce4b-4e4f-81b6-9d6ef6fa67c2",
+ "metadata": {},
+ "source": [
+ "### Referencing memory \n",
+ "MemGPT agents can customize their responses based on what memories they have stored. Try asking a question that related to the human and persona you provided. "
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "id": "fddbefe5-3b94-4a08-aa50-d80fb581c747",
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "response = client.user_message(agent_id=custom_agent.id, message=\"what do I work as?\") \n",
+ "pprint(response.messages)"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "id": "30497119-e208-4a4e-b482-e7cfff346263",
+ "metadata": {},
+ "source": [
+ "### Evolving memory \n",
+ "MemGPT agents have long term memory, and can evolve what they store in their memory over time. In the example below, we make a correction to the previously provided information. See how the agent processes this new information. "
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "id": "679fa708-20ee-4e75-9222-b476f126bc6f",
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "response = client.user_message(agent_id=custom_agent.id, message=\"Actually, my name is Charles\") \n",
+ "pprint(response.messages)"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "id": "686ac5a3-be63-4afd-97ae-b7d05219dd60",
+ "metadata": {},
+ "source": [
+ "Now, look back at the developer portal and at the agent's *core memory*. Do you see a change in the *human* section of the memory? "
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "id": "878d2f49-a5a6-4483-9f69-7436bcf00cfb",
+ "metadata": {},
+ "source": [
+ "## Part 3: Adding Tools \n",
+ "MemGPT agents can be connected to custom tools. Currently, tools must be created by service administrators. However, you can add additional tools provided by the service administrator to the agent you create. "
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "id": "35785d36-2674-4a00-937b-4c747e0fb6bf",
+ "metadata": {},
+ "source": [
+ "### View Available Tools "
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "id": "c307a6f7-276b-49f5-8d3d-48aaaea221a7",
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "tools = client.list_tools().tools\n",
+ "for tool in tools: \n",
+ " print(f\"Tool: {tool.name} - {tool.json_schema['description']}\")"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "id": "318d19dc-b9dd-448c-ab5c-9c9311d21fad",
+ "metadata": {},
+ "source": [
+ "### Create a tool using agent in the developer portal \n",
+ "Create an agent in the developer portal and toggle additional tools you want the agent to use. We recommend modifying the *persona* to notify the agent that it should be using the tools for certain tasks. \n",
+ "\n",
+ "\n",
+ "
"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "id": "aecdaa70-861a-43d5-b006-fecd90a8ed19",
+ "metadata": {},
+ "source": [
+ "## Part 4: Cleanup (optional) \n",
+ "You can cleanup the agents you creating the following command to delete your agents: "
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "id": "1320d9c9-170b-48a8-b5e8-70737b1a8aac",
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "for agent in client.list_agents().agents: \n",
+ " client.delete_agent(agent[\"id\"])\n",
+ " print(f\"Deleted agent {agent['name']} with ID {agent['id']}\")"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "id": "510675a8-22bc-4f9f-9c79-91e2ffa9caf9",
+ "metadata": {},
+ "source": [
+ "## 🎉 Congrats, you're done with day 1 of MemGPT! \n",
+ "For day 2, we'll go over how to connect *data sources* to MemGPT to run RAG agents. "
+ ]
+ }
+ ],
+ "metadata": {
+ "kernelspec": {
+ "display_name": "memgpt",
+ "language": "python",
+ "name": "memgpt"
+ },
+ "language_info": {
+ "codemirror_mode": {
+ "name": "ipython",
+ "version": 3
+ },
+ "file_extension": ".py",
+ "mimetype": "text/x-python",
+ "name": "python",
+ "nbconvert_exporter": "python",
+ "pygments_lexer": "ipython3",
+ "version": "3.12.2"
+ }
+ },
+ "nbformat": 4,
+ "nbformat_minor": 5
+}
diff --git a/memgpt/agent.py b/memgpt/agent.py
index 22aeeb40..57aa22f2 100644
--- a/memgpt/agent.py
+++ b/memgpt/agent.py
@@ -86,9 +86,14 @@ def link_functions(function_schemas: list):
raise ValueError(f"While loading agent.state.functions encountered a bad function schema object with no name:\n{f_schema}")
linked_function = available_functions.get(f_name)
if linked_function is None:
- raise ValueError(
+ # raise ValueError(
+ # f"Function '{f_name}' was specified in agent.state.functions, but is not in function library:\n{available_functions.keys()}"
+ # )
+ print(
f"Function '{f_name}' was specified in agent.state.functions, but is not in function library:\n{available_functions.keys()}"
)
+ continue
+
# Once we find a matching function, make sure the schema is identical
if json.dumps(f_schema, ensure_ascii=JSON_ENSURE_ASCII) != json.dumps(
linked_function["json_schema"], ensure_ascii=JSON_ENSURE_ASCII
diff --git a/memgpt/client/admin.py b/memgpt/client/admin.py
index 94469952..f5227fb5 100644
--- a/memgpt/client/admin.py
+++ b/memgpt/client/admin.py
@@ -63,7 +63,6 @@ class Admin:
if response.status_code != 200:
raise HTTPError(response.json())
response_json = response.json()
- print(response_json)
return CreateUserResponse(**response_json)
def delete_user(self, user_id: uuid.UUID):
@@ -89,23 +88,23 @@ class Admin:
"""Add a tool implemented in a file path"""
source_code = open(file_path, "r", encoding="utf-8").read()
data = {"name": name, "source_code": source_code, "source_type": source_type, "tags": tags}
- response = requests.post(f"{self.base_url}/api/tools", json=data, headers=self.headers)
+ response = requests.post(f"{self.base_url}/admin/tools", json=data, headers=self.headers)
if response.status_code != 200:
raise ValueError(f"Failed to create tool: {response.text}")
return ToolModel(**response.json())
def list_tools(self) -> ListToolsResponse:
- response = requests.get(f"{self.base_url}/api/tools", headers=self.headers)
+ response = requests.get(f"{self.base_url}/admin/tools", headers=self.headers)
return ListToolsResponse(**response.json())
def delete_tool(self, name: str):
- response = requests.delete(f"{self.base_url}/api/tools/{name}", headers=self.headers)
+ response = requests.delete(f"{self.base_url}/admin/tools/{name}", headers=self.headers)
if response.status_code != 200:
raise ValueError(f"Failed to delete tool: {response.text}")
return response.json()
def get_tool(self, name: str):
- response = requests.get(f"{self.base_url}/api/tools/{name}", headers=self.headers)
+ response = requests.get(f"{self.base_url}/admin/tools/{name}", headers=self.headers)
if response.status_code == 404:
return None
elif response.status_code != 200:
diff --git a/memgpt/client/client.py b/memgpt/client/client.py
index b0226cd6..e519a867 100644
--- a/memgpt/client/client.py
+++ b/memgpt/client/client.py
@@ -239,7 +239,6 @@ class RESTClient(AbstractClient):
super().__init__(debug=debug)
self.base_url = base_url
self.headers = {"accept": "application/json", "authorization": f"Bearer {token}"}
- self.token = token
# agents
@@ -696,7 +695,7 @@ class LocalClient(AbstractClient):
if self.auto_save:
self.save()
else:
- return self.interface.to_list()
+ return UserMessageResponse(messages=self.interface.to_list())
def run_command(self, agent_id: str, command: str) -> Union[str, None]:
self.interface.clear()
diff --git a/memgpt/client/utils.py b/memgpt/client/utils.py
new file mode 100644
index 00000000..254a82f1
--- /dev/null
+++ b/memgpt/client/utils.py
@@ -0,0 +1,61 @@
+from datetime import datetime
+
+from IPython.display import HTML, display
+
+
+def pprint(messages):
+ """Utility function for pretty-printing the output of client.send_message in notebooks"""
+
+ css_styles = """
+
+ """
+
+ html_content = css_styles + "
🛠️ [{date_formatted}] Function Return ({return_status}):
" + html_content += f"{return_string}
" + elif "internal_monologue" in message: + html_content += f"💭 [{date_formatted}] Internal Monologue:
" + html_content += f"{message['internal_monologue']}
" + elif "function_call" in message: + html_content += f"🛠️ [[{date_formatted}] Function Call:
" + html_content += f"{message['function_call']}
" + elif "assistant_message" in message: + html_content += f"🤖 [{date_formatted}] Assistant Message:
" + html_content += f"" + html_content += "