docs: fix python examples (#829)

This commit is contained in:
cthomas
2025-01-29 14:13:22 -08:00
committed by GitHub
parent acc59fd296
commit 29feb4c55c
12 changed files with 328 additions and 214 deletions

View File

@@ -19,7 +19,7 @@
"metadata": {},
"source": [
"## Setup a Letta client \n",
"Make sure you run `pip install letta` and `letta quickstart`"
"Make sure you run `pip install letta_client` and start letta server `letta quickstart`"
]
},
{
@@ -29,8 +29,9 @@
"metadata": {},
"outputs": [],
"source": [
"!pip install letta_client\n",
"!pip install letta\n",
"! letta quickstart"
"!letta quickstart"
]
},
{
@@ -40,22 +41,9 @@
"metadata": {},
"outputs": [],
"source": [
"from letta import create_client \n",
"from letta_client import CreateBlock, Letta, MessageCreate \n",
"\n",
"client = create_client() "
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "9a28e38a-7dbe-4530-8260-202322a8458e",
"metadata": {},
"outputs": [],
"source": [
"from letta import LLMConfig, EmbeddingConfig\n",
"\n",
"client.set_default_llm_config(LLMConfig.default_config(\"gpt-4o-mini\")) \n",
"client.set_default_embedding_config(EmbeddingConfig.default_config(provider=\"openai\")) "
"client = Letta(base_url=\"http://localhost:8283\")"
]
},
{
@@ -92,14 +80,20 @@
"metadata": {},
"outputs": [],
"source": [
"from letta.schemas.memory import ChatMemory\n",
"\n",
"agent_state = client.create_agent(\n",
"agent_state = client.agents.create(\n",
" name=agent_name, \n",
" memory=ChatMemory(\n",
" human=\"My name is Sarah\", \n",
" persona=\"You are a helpful assistant that loves emojis\"\n",
" )\n",
" memory_blocks=[\n",
" CreateBlock(\n",
" label=\"human\",\n",
" value=\"My name is Sarah\",\n",
" ),\n",
" CreateBlock(\n",
" label=\"persona\",\n",
" value=\"You are a helpful assistant that loves emojis\",\n",
" ),\n",
" ]\n",
" model=\"openai/gpt-4o-mini\",\n",
" embedding=\"openai/text-embedding-ada-002\",\n",
")"
]
},
@@ -110,10 +104,14 @@
"metadata": {},
"outputs": [],
"source": [
"response = client.send_message(\n",
"response = client.agents.messages.create(\n",
" agent_id=agent_state.id, \n",
" message=\"hello!\", \n",
" role=\"user\" \n",
" messages=[\n",
" MessageCreate(\n",
" role=\"user\", \n",
" content=\"hello!\", \n",
" ),\n",
" ]\n",
")\n",
"response"
]
@@ -123,7 +121,7 @@
"id": "20a5ccf4-addd-4bdb-be80-161f7925dae0",
"metadata": {},
"source": [
"Note that MemGPT agents will generate an *internal_monologue* that explains its actions. You can use this monoloque to understand why agents are behaving as they are. \n",
"Note that MemGPT agents will generate a *reasoning_message* that explains its actions. You can use this monoloque to understand why agents are behaving as they are. \n",
"\n",
"Second, MemGPT agents also use tools to communicate, so messages are sent back by calling a `send_message` tool. This makes it easy to allow agent to communicate over different mediums (e.g. text), and also allows the agent to distinguish betweeh that is and isn't send to the end user. "
]
@@ -175,7 +173,7 @@
"metadata": {},
"outputs": [],
"source": [
"memory = client.get_core_memory(agent_state.id)"
"memory = client.agents.core_memory.retrieve(agent_id=agent_state.id)"
]
},
{
@@ -195,7 +193,7 @@
"metadata": {},
"outputs": [],
"source": [
"client.get_archival_memory_summary(agent_state.id)"
"client.agents.context.retrieve(agent_id=agent_state.id)[\"num_archival_memory\"]"
]
},
{
@@ -205,7 +203,7 @@
"metadata": {},
"outputs": [],
"source": [
"client.get_recall_memory_summary(agent_state.id)"
"client.agents.context.retrieve(agent_id=agent_state.id)[\"num_recall_memory\"]"
]
},
{
@@ -215,7 +213,7 @@
"metadata": {},
"outputs": [],
"source": [
"client.get_messages(agent_state.id)"
"client.agents.messages.list(agent_id=agent_state.id)"
]
},
{
@@ -243,11 +241,15 @@
"metadata": {},
"outputs": [],
"source": [
"response = client.send_message(\n",
"response = client.agents.messages.create(\n",
" agent_id=agent_state.id, \n",
" message = \"My name is actually Bob\", \n",
" role = \"user\"\n",
") \n",
" messages=[\n",
" MessageCreate(\n",
" role=\"user\", \n",
" content=\"My name is actually Bob\", \n",
" ),\n",
" ]\n",
")\n",
"response"
]
},
@@ -258,7 +260,7 @@
"metadata": {},
"outputs": [],
"source": [
"client.get_core_memory(agent_state.id)"
"client.agents.core_memory.retrieve(agent_id=agent_state.id)"
]
},
{
@@ -277,11 +279,15 @@
"metadata": {},
"outputs": [],
"source": [
"response = client.send_message(\n",
" agent_id=agent_state.id, \n",
" message = \"In the future, never use emojis to communicate\", \n",
" role = \"user\"\n",
") \n",
"response = client.agents.messages.create(\n",
" agent_id=agent_state.id,\n",
" messages=[\n",
" MessageCreate(\n",
" role=\"user\", \n",
" content=\"In the future, never use emojis to communicate\", \n",
" ),\n",
" ]\n",
")\n",
"response"
]
},
@@ -292,7 +298,7 @@
"metadata": {},
"outputs": [],
"source": [
"client.get_core_memory(agent_state.id).get_block('persona')"
"client.agents.core_memory.retrieve_block(agent_id=agent_state.id, block_label='persona')"
]
},
{
@@ -311,7 +317,7 @@
"metadata": {},
"outputs": [],
"source": [
"client.get_archival_memory(agent_state.id)"
"client.agents.archival_memory.list(agent_id=agent_state.id)"
]
},
{
@@ -321,7 +327,7 @@
"metadata": {},
"outputs": [],
"source": [
"client.get_archival_memory_summary(agent_state.id)"
"client.agents.context.retrieve(agent_id=agent_state.id)[\"num_archival_memory\"]"
]
},
{
@@ -339,11 +345,15 @@
"metadata": {},
"outputs": [],
"source": [
"response = client.send_message(\n",
"response = client.agents.messages.create(\n",
" agent_id=agent_state.id, \n",
" message = \"Save the information that 'bob loves cats' to archival\", \n",
" role = \"user\"\n",
") \n",
" messages=[\n",
" MessageCreate(\n",
" role=\"user\", \n",
" content=\"Save the information that 'bob loves cats' to archival\", \n",
" ),\n",
" ]\n",
")\n",
"response"
]
},
@@ -354,7 +364,7 @@
"metadata": {},
"outputs": [],
"source": [
"client.get_archival_memory(agent_state.id)[0].text"
"client.agents.archival_memory.list(agent_id=agent_state.id)[0].text"
]
},
{
@@ -372,9 +382,9 @@
"metadata": {},
"outputs": [],
"source": [
"client.insert_archival_memory(\n",
" agent_state.id, \n",
" \"Bob's loves boston terriers\"\n",
"client.agents.archival_memory.create(\n",
" agent_id=agent_state.id, \n",
" text=\"Bob's loves boston terriers\"\n",
")"
]
},
@@ -393,21 +403,17 @@
"metadata": {},
"outputs": [],
"source": [
"response = client.send_message(\n",
"response = client.agents.messages.create(\n",
" agent_id=agent_state.id, \n",
" role=\"user\", \n",
" message=\"What animals do I like? Search archival.\"\n",
" messages=[\n",
" MessageCreate(\n",
" role=\"user\", \n",
" content=\"What animals do I like? Search archival.\", \n",
" ),\n",
" ]\n",
")\n",
"response"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "adc394c8-1d88-42bf-a6a5-b01f20f78d81",
"metadata": {},
"outputs": [],
"source": []
}
],
"metadata": {

View File

@@ -41,7 +41,7 @@ agent_state = client.agents.create(
print(f"Created agent with name {agent_state.name} and unique ID {agent_state.id}")
# message an agent as a user
response = client.agents.messages.send(
response = client.agents.messages.create(
agent_id=agent_state.id,
messages=[
MessageCreate(
@@ -54,7 +54,7 @@ print("Usage", response.usage)
print("Agent messages", response.messages)
# message a system message (non-user)
response = client.agents.messages.send(
response = client.agents.messages.create(
agent_id=agent_state.id,
messages=[
MessageCreate(

View File

@@ -24,7 +24,7 @@ agent_state = client.agents.create(
print(f"Created agent with name {agent_state.name} and unique ID {agent_state.id}")
# Message an agent
response = client.agents.messages.send(
response = client.agents.messages.create(
agent_id=agent_state.id,
messages=[
MessageCreate(
@@ -40,7 +40,7 @@ print("Agent messages", response.messages)
agents = client.agents.list()
# get the agent by ID
agent_state = client.agents.get(agent_id=agent_state.id)
agent_state = client.agents.retrieve(agent_id=agent_state.id)
# get the agent by name
agent_state = client.agents.list(name=agent_state.name)[0]

115
examples/docs/example.py Normal file
View File

@@ -0,0 +1,115 @@
from letta_client import CreateBlock, Letta, MessageCreate
"""
Make sure you run the Letta server before running this example.
```
letta server
```
Execute this script using `poetry run python3 example.py`
"""
client = Letta(
base_url="http://localhost:8283",
)
agent = client.agents.create(
memory_blocks=[
CreateBlock(
value="Name: Caren",
label="human",
),
],
model="openai/gpt-4o-mini",
embedding="openai/text-embedding-ada-002",
)
print(f"Created agent with name {agent.name}")
message_text = "What's my name?"
response = client.agents.messages.create(
agent_id=agent.id,
messages=[
MessageCreate(
role="user",
content=message_text,
),
],
)
print(f"Sent message to agent {agent.name}: {message_text}")
print(f"Agent thoughts: {response.messages[0].reasoning}")
print(f"Agent response: {response.messages[1].content}")
def secret_message():
"""Return a secret message."""
return "Hello world!"
tool = client.tools.upsert_from_function(
func=secret_message,
)
client.agents.tools.attach(agent_id=agent.id, tool_id=tool.id)
print(f"Created tool {tool.name} and attached to agent {agent.name}")
message_text = "Run secret message tool and tell me what it returns"
response = client.agents.messages.create(
agent_id=agent.id,
messages=[
MessageCreate(
role="user",
content=message_text,
),
],
)
print(f"Sent message to agent {agent.name}: {message_text}")
print(f"Agent thoughts: {response.messages[0].reasoning}")
print(f"Tool call information: {response.messages[1].tool_call}")
print(f"Tool response information: {response.messages[2].status}")
print(f"Agent thoughts: {response.messages[3].reasoning}")
print(f"Agent response: {response.messages[4].content}")
agent_copy = client.agents.create(
model="openai/gpt-4o-mini",
embedding="openai/text-embedding-ada-002",
)
block = client.agents.core_memory.retrieve_block(agent.id, "human")
agent_copy = client.agents.core_memory.attach_block(agent_copy.id, block.id)
print(f"Created agent copy with shared memory named {agent_copy.name}")
message_text = "My name isn't Caren, it's Sarah. Please update your core memory with core_memory_replace"
response = client.agents.messages.create(
agent_id=agent_copy.id,
messages=[
MessageCreate(
role="user",
content=message_text,
),
],
)
print(f"Sent message to agent {agent_copy.name}: {message_text}")
block = client.agents.core_memory.retrieve_block(agent_copy.id, "human")
print(f"New core memory for agent {agent_copy.name}: {block.value}")
message_text = "What's my name?"
response = client.agents.messages.create(
agent_id=agent_copy.id,
messages=[
MessageCreate(
role="user",
content=message_text,
),
],
)
print(f"Sent message to agent {agent_copy.name}: {message_text}")
print(f"Agent thoughts: {response.messages[0].reasoning}")
print(f"Agent response: {response.messages[1].content}")
client.agents.delete(agent_id=agent.id)
client.agents.delete(agent_id=agent_copy.id)
print(f"Deleted agents {agent.name} and {agent_copy.name}")

View File

@@ -6,7 +6,13 @@ import {
ToolReturnMessage,
} from '@letta-ai/letta-client/api/types';
// Start letta server and run `npm run example`
/**
* Make sure you run the Letta server before running this example.
* ```
* letta server
* ```
* Execute this script using `npm run example`
*/
const client = new LettaClient({
baseUrl: 'http://localhost:8283',
});
@@ -56,9 +62,7 @@ const tool = await client.tools.upsert({
await client.agents.tools.attach(agent.id, tool.id!);
console.log(
`Created tool with name ${tool.name} and attached to agent ${agent.name}`,
);
console.log(`Created tool ${tool.name} and attached to agent ${agent.name}`);
messageText = 'Run secret message tool and tell me what it returns';
response = await client.agents.messages.create(agent.id, {
@@ -70,21 +74,21 @@ response = await client.agents.messages.create(agent.id, {
],
});
console.log('Sent message to agent:', messageText);
console.log(`Sent message to agent ${agent.name}: ${messageText}`);
console.log(
'Agent thoughts',
'Agent thoughts:',
(response.messages[0] as ReasoningMessage).reasoning,
);
console.log(
'Tool call information',
'Tool call information:',
(response.messages[1] as ToolCallMessage).toolCall,
);
console.log(
'Tool response information',
'Tool response information:',
(response.messages[2] as ToolReturnMessage).status,
);
console.log(
'Agent thoughts',
'Agent thoughts:',
(response.messages[3] as ReasoningMessage).reasoning,
);
console.log(
@@ -103,8 +107,6 @@ console.log('Created agent copy with shared memory named', agentCopy.name);
messageText =
"My name isn't Caren, it's Sarah. Please update your core memory with core_memory_replace";
console.log(`Sent message to agent ${agentCopy.name}: ${messageText}`);
response = await client.agents.messages.create(agentCopy.id, {
messages: [
{
@@ -114,6 +116,8 @@ response = await client.agents.messages.create(agentCopy.id, {
],
});
console.log(`Sent message to agent ${agentCopy.name}: ${messageText}`);
block = await client.agents.coreMemory.retrieveBlock(agentCopy.id, 'human');
console.log(`New core memory for agent ${agentCopy.name}: ${block.value}`);
@@ -136,3 +140,8 @@ console.log(
'Agent response:',
(response.messages[1] as AssistantMessage).content,
);
await client.agents.delete(agent.id);
await client.agents.delete(agentCopy.id);
console.log(`Deleted agents ${agent.name} and ${agentCopy.name}`);

View File

@@ -38,7 +38,7 @@ def main():
# Send a message to the agent
print(f"Created agent: {agent_state.name} with ID {str(agent_state.id)}")
response = client.agents.messages.send(
response = client.agents.messages.create(
agent_id=agent_state.id,
messages=[
MessageCreate(

View File

@@ -33,7 +33,7 @@ def roll_d20() -> str:
# create a tool from the function
tool = client.tools.upsert_from_function(func=roll_d20, name="roll_d20")
tool = client.tools.upsert_from_function(func=roll_d20)
print(f"Created tool with name {tool.name}")
# create a new agent
@@ -59,7 +59,7 @@ agent_state = client.agents.create(
print(f"Created agent with name {agent_state.name} with tools {[t.name for t in agent_state.tools]}")
# Message an agent
response = client.agents.messages.send(
response = client.agents.messages.create(
agent_id=agent_state.id,
messages=[
MessageCreate(
@@ -72,15 +72,15 @@ print("Usage", response.usage)
print("Agent messages", response.messages)
# remove a tool from the agent
client.agents.tools.remove(agent_id=agent_state.id, tool_id=tool.id)
client.agents.tools.detach(agent_id=agent_state.id, tool_id=tool.id)
# add a tool to the agent
client.agents.tools.add(agent_id=agent_state.id, tool_id=tool.id)
client.agents.tools.attach(agent_id=agent_state.id, tool_id=tool.id)
client.agents.delete(agent_id=agent_state.id)
# create an agent with only a subset of default tools
send_message_tool = client.tools.get_by_name(tool_name="send_message")
send_message_tool = [t for t in client.tools.list() if t.name == "send_message"][0]
agent_state = client.agents.create(
memory_blocks=[
CreateBlock(
@@ -91,11 +91,11 @@ agent_state = client.agents.create(
model="openai/gpt-4o-mini",
embedding="openai/text-embedding-ada-002",
include_base_tools=False,
tool_ids=[tool.id, send_message_tool],
tool_ids=[tool.id, send_message_tool.id],
)
# message the agent to search archival memory (will be unable to do so)
client.agents.messages.send(
client.agents.messages.create(
agent_id=agent_state.id,
messages=[
MessageCreate(

View File

@@ -141,7 +141,6 @@
"client.sources.attach(\n",
" source_id=source.id,\n",
" agent_id=agent_state.id\n",
" \n",
")"
]
},
@@ -241,7 +240,7 @@
}
],
"source": [
"response = client.agents.messages.send(\n",
"response = client.agents.messages.create(\n",
" agent_id=agent_state.id,\n",
" messages=[\n",
" MessageCreate(\n",
@@ -408,7 +407,7 @@
"metadata": {},
"outputs": [],
"source": [
"agent_state = client.create_agent(\n",
"agent_state = client.agents.create(\n",
" name=\"birthday_agent\", \n",
" tool_ids=[birthday_tool.id],\n",
" memory_blocks=[\n",
@@ -523,7 +522,7 @@
}
],
"source": [
"response = client.agents.messages.send(\n",
"response = client.agents.messages.create(\n",
" agent_id=agent_state.id,\n",
" messages=[\n",
" MessageCreate(\n",
@@ -680,7 +679,7 @@
"\n",
"\"\"\"\n",
"\n",
"agent_state = client.create_agent(\n",
"agent_state = client.agents.create(\n",
" name=\"search_agent\", \n",
" memory_blocks=[\n",
" CreateBlock(\n",
@@ -809,7 +808,7 @@
}
],
"source": [
"response = client.agents.messages.send(\n",
"response = client.agents.messages.create(\n",
" agent_id=agent_state.id, \n",
" messages=[\n",
" MessageCreate(\n",
@@ -839,7 +838,7 @@
"from letta.schemas.llm_config import LLMConfig\n",
"\n",
"\n",
"agent_state = client.create_agent(\n",
"agent_state = client.agents.create(\n",
" name=\"search_agent\", \n",
" memory_blocks=[\n",
" CreateBlock(\n",
@@ -958,14 +957,6 @@
")\n",
"response"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "91192bb7-4a74-4c94-a485-883d930b0489",
"metadata": {},
"outputs": [],
"source": []
}
],
"metadata": {

View File

@@ -248,7 +248,7 @@
"id": "fbdc9b6e-8bd5-4c42-970e-473da4adb2f2",
"metadata": {},
"source": [
"### Defining a memory module\n"
"### Defining task related tools\n"
]
},
{
@@ -258,56 +258,46 @@
"metadata": {},
"outputs": [],
"source": [
"from letta import ChatMemory, Block \n",
"from typing import Optional, List\n",
"import json\n",
"\n",
"class TaskMemory(ChatMemory): \n",
"def task_queue_push(self: \"Agent\", task_description: str):\n",
" \"\"\"\n",
" Push to a task queue stored in core memory. \n",
"\n",
" def __init__(self, human: str, persona: str, tasks: List[str]): \n",
" super().__init__(human=human, persona=persona, limit=2000) \n",
" self.link_block( \n",
" Block(\n",
" limit=2000, \n",
" value=json.dumps(tasks), \n",
" label=\"tasks\"\n",
" )\n",
" )\n",
" Args:\n",
" task_description (str): A description of the next task you must accomplish. \n",
" \n",
" Returns:\n",
" Optional[str]: None is always returned as this function \n",
" does not produce a response.\n",
" \"\"\"\n",
" import json\n",
" tasks = json.loads(self.memory.get_block(\"tasks\").value)\n",
" tasks.append(task_description)\n",
" self.memory.update_block_value(\"tasks\", json.dumps(tasks))\n",
" return None\n",
"\n",
" def task_queue_push(self: \"Agent\", task_description: str):\n",
" \"\"\"\n",
" Push to a task queue stored in core memory. \n",
"def task_queue_pop(self: \"Agent\"):\n",
" \"\"\"\n",
" Get the next task from the task queue \n",
"\n",
" Args:\n",
" task_description (str): A description of the next task you must accomplish. \n",
" \n",
" Returns:\n",
" Optional[str]: None is always returned as this function \n",
" does not produce a response.\n",
" \"\"\"\n",
" import json\n",
" tasks = json.loads(self.memory.get_block(\"tasks\").value)\n",
" tasks.append(task_description)\n",
" self.memory.update_block_value(\"tasks\", json.dumps(tasks))\n",
" Returns:\n",
" Optional[str]: The description of the task popped from the \n",
" queue, if there are still tasks in queue. Otherwise, returns\n",
" None (the task queue is empty)\n",
" \"\"\"\n",
" import json\n",
" tasks = json.loads(self.memory.get_block(\"tasks\").value)\n",
" if len(tasks) == 0: \n",
" return None\n",
" task = tasks[0]\n",
" print(\"CURRENT TASKS: \", tasks)\n",
" self.memory.update_block_value(\"tasks\", json.dumps(tasks[1:]))\n",
" return task\n",
"\n",
" def task_queue_pop(self: \"Agent\"):\n",
" \"\"\"\n",
" Get the next task from the task queue \n",
" \n",
" Returns:\n",
" Optional[str]: The description of the task popped from the \n",
" queue, if there are still tasks in queue. Otherwise, returns\n",
" None (the task queue is empty)\n",
" \"\"\"\n",
" import json\n",
" tasks = json.loads(self.memory.get_block(\"tasks\").value)\n",
" if len(tasks) == 0: \n",
" return None\n",
" task = tasks[0]\n",
" print(\"CURRENT TASKS: \", tasks)\n",
" self.memory.update_block_value(\"tasks\", json.dumps(tasks[1:]))\n",
" return task\n"
"push_task_tool = client.tools.upsert_from_function(func=task_queue_push)\n",
"pop_task_tool = client.tools.upsert_from_function(func=task_queue_pop)"
]
},
{
@@ -328,17 +318,28 @@
"task_agent_name = \"task_agent\"\n",
"\n",
"# delete agent if exists \n",
"if client.get_agent_id(task_agent_name): \n",
" client.delete_agent(client.get_agent_id(task_agent_name))\n",
"agents = client.agents.list(name=task_agent_name)\n",
"if len(agents) > 0: \n",
" client.agents.delete(agent_id=agents[0].id)\n",
"\n",
"task_agent_state = client.create_agent(\n",
"task_agent_state = client.agents.create(\n",
" name=task_agent_name, \n",
" system = open(\"data/task_queue_system_prompt.txt\", \"r\").read(),\n",
" memory=TaskMemory(\n",
" human=\"My name is Sarah\", \n",
" persona=\"You are an agent that must clear its tasks.\", \n",
" tasks=[]\n",
" )\n",
" memory_blocks=[\n",
" CreateBlock(\n",
" label=\"human\",\n",
" value=\"My name is Sarah\",\n",
" ),\n",
" CreateBlock(\n",
" label=\"persona\",\n",
" value=\"You are an agent that must clear its tasks.\",\n",
" ),\n",
" CreateBlock(\n",
" label=\"tasks\",\n",
" value=\"\",\n",
" ),\n",
" ],\n",
" tool_ids=[push_task_tool.id, pop_task_tool.id],\n",
")"
]
},
@@ -491,10 +492,14 @@
}
],
"source": [
"response = client.send_message(\n",
"response = client.agents.messages.create(\n",
" agent_id=task_agent_state.id, \n",
" role=\"user\", \n",
" message=\"Add 'start calling me Charles' and 'tell me a haiku about my name' as two separate tasks.\"\n",
" messages=[\n",
" MessageCreate(\n",
" role=\"user\",\n",
" content=\"Add 'start calling me Charles' and 'tell me a haiku about my name' as two separate tasks.\",\n",
" )\n",
" ],\n",
")\n",
"response"
]
@@ -580,10 +585,14 @@
}
],
"source": [
"response = client.send_message(\n",
"response = client.agents.messages.create(\n",
" agent_id=task_agent_state.id, \n",
" role=\"user\", \n",
" message=\"complete your tasks\"\n",
" messages=[\n",
" MessageCreate(\n",
" role=\"user\",\n",
" content=\"complete your tasks\",\n",
" )\n",
" ],\n",
")\n",
"response"
]
@@ -669,10 +678,14 @@
}
],
"source": [
"response = client.send_message(\n",
"response = client.agents.messages.create(\n",
" agent_id=task_agent_state.id, \n",
" role=\"user\", \n",
" message=\"keep going\"\\\n",
" messages=[\n",
" MessageCreate(\n",
" role=\"user\",\n",
" content=\"keep going\",\n",
" )\n",
" ],\n",
")\n",
"response"
]
@@ -695,16 +708,8 @@
}
],
"source": [
"client.get_in_context_memory(task_agent_state.id).get_block(\"tasks\")"
"client.agents.core_memory.retrieve_block(agent_id=task_agent_state.id, block_label=\"tasks\")"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "bfb41f81-26e0-4bb7-8a49-b90a2e8b9ec6",
"metadata": {},
"outputs": [],
"source": []
}
],
"metadata": {

View File

@@ -7,6 +7,7 @@
"source": [
"# Introduction to Letta\n",
"> Make sure you run the Letta server before running this example using `letta server`\n",
"\n",
"This lab will go over: \n",
"1. Creating an agent with Letta\n",
"2. Understand Letta agent state (messages, memories, tools)\n",
@@ -68,7 +69,7 @@
"metadata": {},
"outputs": [],
"source": [
"agent_state = client.create_agent(\n",
"agent_state = client.agents.create(\n",
" name=agent_name, \n",
" memory_blocks=[\n",
" CreateBlock(\n",
@@ -164,7 +165,7 @@
}
],
"source": [
"response = client.agents.messages.send(\n",
"response = client.agents.messages.create(\n",
" agent_id=agent_state.id, \n",
" messages=[\n",
" MessageCreate(\n",
@@ -315,7 +316,7 @@
"metadata": {},
"outputs": [],
"source": [
"memory = client.agents.core_memory.get_blocks(agent_id=agent_state.id)"
"memory = client.agents.core_memory.retrieve(agent_id=agent_state.id)"
]
},
{
@@ -357,7 +358,7 @@
}
],
"source": [
"client.agents.archival_memory.get_summary(agent_id=agent_state.id)"
"client.agents.context.retrieve(agent_id=agent_state.id)[\"num_archival_memory\"]"
]
},
{
@@ -378,7 +379,7 @@
}
],
"source": [
"client.agents.recall_memory.get_summary(agent_id=agent_state.id)"
"client.agents.context.retrieve(agent_id=agent_state.id)[\"num_recall_memory\"]"
]
},
{
@@ -524,7 +525,7 @@
}
],
"source": [
"response = client.agents.messages.send(\n",
"response = client.agents.messages.create(\n",
" agent_id=agent_state.id, \n",
" messages=[\n",
" MessageCreate(\n",
@@ -554,7 +555,7 @@
}
],
"source": [
"client.agents.core_memory.get_blocks(agent_id=agent_state.id)"
"client.agents.core_memory.retrieve(agent_id=agent_state.id)"
]
},
{
@@ -677,7 +678,7 @@
}
],
"source": [
"response = client.agents.messages.send(\n",
"response = client.agents.messages.create(\n",
" agent_id=agent_state.id, \n",
" messages=[\n",
" MessageCreate(\n",
@@ -707,7 +708,7 @@
}
],
"source": [
"client.agents.core_memory.get_block(agent_id=agent_state.id, block_label='persona')"
"client.agents.core_memory.retrieve_block(agent_id=agent_state.id, block_label='persona')"
]
},
{
@@ -758,7 +759,7 @@
}
],
"source": [
"client.agents.archival_memory.get_summary(agent_id=agent_state.id)"
"client.agents.context.retrieve(agent_id=agent_state.id)[\"num_archival_memory\"]"
]
},
{
@@ -865,7 +866,7 @@
}
],
"source": [
"response = client.agents.messages.send(\n",
"response = client.agents.messages.create(\n",
" agent_id=agent_state.id, \n",
" messages=[\n",
" MessageCreate(\n",
@@ -1034,7 +1035,7 @@
}
],
"source": [
"response = client.agents.messages.send(\n",
"response = client.agents.messages.create(\n",
" agent_id=agent_state.id, \n",
" messages=[\n",
" MessageCreate(\n",
@@ -1045,14 +1046,6 @@
")\n",
"response"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "7c9b39df-d4ca-4d12-a6c4-cf3d0efa9738",
"metadata": {},
"outputs": [],
"source": []
}
],
"metadata": {

View File

@@ -123,7 +123,6 @@
" \"\"\"\n",
" import os\n",
" filepath = os.path.join(\"data\", \"resumes\", name.lower().replace(\" \", \"_\") + \".txt\")\n",
" #print(\"read\", filepath)\n",
" return open(filepath).read()\n",
"\n",
"def submit_evaluation(self, candidate_name: str, reach_out: bool, resume: str, justification: str): \n",
@@ -154,8 +153,8 @@
"\n",
"# TODO: add an archival andidate tool (provide justification) \n",
"\n",
"read_resume_tool = client.tools.upsert_from_function(name=\"read_resume\", func=read_resume) \n",
"submit_evaluation_tool = client.tools.upsert_from_function(name=\"submit_evaluation\", func=submit_evaluation)"
"read_resume_tool = client.tools.upsert_from_function(func=read_resume) \n",
"submit_evaluation_tool = client.tools.upsert_from_function(func=submit_evaluation)"
]
},
{
@@ -213,7 +212,7 @@
" print(\"Pretend to email:\", content)\n",
" return\n",
"\n",
"email_candidate_tool = client.tools.upsert_from_function(name=\"email_candidate\", func=email_candidate)"
"email_candidate_tool = client.tools.upsert_from_function(func=email_candidate)"
]
},
{
@@ -668,7 +667,7 @@
}
],
"source": [
"client.get_block(org_block.id)"
"client.blocks.retrieve(block_id=org_block.id)"
]
},
{
@@ -718,8 +717,8 @@
"\n",
"\n",
"# create tools \n",
"search_candidate_tool = client.tools.upsert_from_function(name=\"search_candidates_db\", func=search_candidates_db)\n",
"consider_candidate_tool = client.tools.upsert_from_function(name=\"consider_candidate\", func=consider_candidate)\n",
"search_candidate_tool = client.tools.upsert_from_function(func=search_candidates_db)\n",
"consider_candidate_tool = client.tools.upsert_from_function(func=consider_candidate)\n",
"\n",
"# create recruiter agent\n",
"recruiter_agent = client.agents.create(\n",
@@ -855,18 +854,9 @@
"metadata": {},
"outputs": [],
"source": [
"client.agents.delete(eval_agent.id)\n",
"client.agents.delete(outreach_agent.id)"
]
},
{
"cell_type": "code",
"execution_count": 29,
"id": "672f941e-af17-4b5c-8a21-925a1d88c47f",
"metadata": {},
"outputs": [],
"source": [
"client.agents.delete(recruiter_agent.id)"
"client.agents.delete(agent_id=eval_agent.id)\n",
"client.agents.delete(agent_id=outreach_agent.id)\n",
"client.agents.delete(agent_id=recruiter_agent.id)"
]
}
],

View File

@@ -49,7 +49,7 @@
"metadata": {},
"outputs": [],
"source": [
"letta_paper = client.create_source(\n",
"letta_paper = client.sources.create(\n",
" name=\"letta_paper\", \n",
")"
]
@@ -69,7 +69,7 @@
"metadata": {},
"outputs": [],
"source": [
"job = client.load_file_to_source(filename=filename, source_id=letta_paper.id)\n",
"job = client.sources.files.upload(filename=filename, source_id=letta_paper.id)\n",
"job"
]
},
@@ -89,14 +89,19 @@
"metadata": {},
"outputs": [],
"source": [
"client.attach_source_to_agent(source_id=letta_paper.id, agent_id=basic_agent.id)\n",
"client.agents.sources.attach(source_id=letta_paper.id, agent_id=basic_agent.id)\n",
"# TODO: add system message saying that file has been attached \n",
"\n",
"from pprint import pprint\n",
"\n",
"# TODO: do soemthing accenture related \n",
"# TODO: brag about query rewriting -- hyde paper \n",
"response = client.user_message(agent_id=basic_agent.id, message=\"what is core memory? search your archival memory.\") \n",
"response = client.agents.messages.create(agent_id=basic_agent.id, messages=[\n",
" MessageCreate(\n",
" role=\"user\",\n",
" content=\"what is core memory? search your archival memory.\",\n",
" )\n",
"])\n",
"pprint(response.messages)"
]
}