feat: add e2e example scripts for documentation (#1995)

This commit is contained in:
Sarah Wooders
2024-11-06 17:39:51 -08:00
committed by GitHub
parent f1ff70a18f
commit f88a0cbfa6
11 changed files with 254 additions and 83 deletions

69
.github/workflows/test_examples.yml vendored Normal file
View File

@@ -0,0 +1,69 @@
name: Examples (documentation)
on:
push:
branches: [ main ]
pull_request:
branches: [ main ]
jobs:
test:
runs-on: ubuntu-latest
timeout-minutes: 15
steps:
- name: Checkout
uses: actions/checkout@v4
- name: Python
uses: actions/setup-python@v5
with:
python-version: '3.12'
- name: Set permissions for log directory
run: |
mkdir -p /home/runner/.letta/logs
sudo chown -R $USER:$USER /home/runner/.letta/logs
chmod -R 755 /home/runner/.letta/logs
- name: Build and run docker dev server
env:
LETTA_PG_DB: letta
LETTA_PG_USER: letta
LETTA_PG_PASSWORD: letta
LETTA_PG_PORT: 8888
OPENAI_API_KEY: ${{ secrets.OPENAI_API_KEY }}
run: docker compose -f dev-compose.yaml up --build -d
#- name: "Setup Python, Poetry and Dependencies"
# uses: packetcoders/action-setup-cache-python-poetry@v1.2.0
# with:
# python-version: "3.12"
# poetry-version: "1.8.2"
# install-args: "--all-extras"
- name: Wait for service
run: bash scripts/wait_for_service.sh http://localhost:8283 -- echo "Service is ready"
- name: Run tests with pytest
env:
LETTA_PG_DB: letta
LETTA_PG_USER: letta
LETTA_PG_PASSWORD: letta
LETTA_PG_PORT: 8888
LETTA_SERVER_PASS: test_server_token
LETTA_SERVER_URL: http://localhost:8283
OPENAI_API_KEY: ${{ secrets.OPENAI_API_KEY }}
PYTHONPATH: ${{ github.workspace }}:${{ env.PYTHONPATH }}
run: |
pipx install poetry==1.8.2
poetry install -E dev -E postgres -E external-tools
poetry run python examples/docs/agent_advanced.py
poetry run python examples/docs/agent_basic.py
poetry run python examples/docs/memory.py
poetry run python examples/docs/rest_client.py
poetry run python examples/docs/tools.py
- name: Print docker logs if tests fail
if: failure()
run: |
echo "Printing Docker Logs..."
docker compose -f dev-compose.yaml logs

View File

@@ -11,6 +11,10 @@ This example show how you can add CrewAI tools .
First, make sure you have CrewAI and some of the extras downloaded.
```
# from pypi
pip install 'letta[external-tools]'
# from source
poetry install --extras "external-tools"
```
then setup letta with `letta configure`.

View File

@@ -0,0 +1,40 @@
from letta import ChatMemory, EmbeddingConfig, LLMConfig, create_client
from letta.prompts import gpt_system
client = create_client()
# create a new agent
agent_state = client.create_agent(
name="agent_name",
memory=ChatMemory(human="Name: Sarah", persona="You are a helpful assistant that loves emojis"),
llm_config=LLMConfig(
model="gpt-4",
model_endpoint_type="openai",
model_endpoint="https://api.openai.com/v1",
context_window=8000,
),
embedding_config=EmbeddingConfig(
embedding_endpoint_type="openai",
embedding_endpoint="https://api.openai.com/v1",
embedding_model="text-embedding-ada-002",
embedding_dim=1536,
embedding_chunk_size=300,
),
system=gpt_system.get_system_text("memgpt_chat"),
tools=[],
include_base_tools=True,
)
print(f"Created agent with name {agent_state.name} and unique ID {agent_state.id}")
# message an agent as a user
response = client.send_message(agent_id=agent_state.id, role="user", message="hello")
print("Usage", response.usage)
print("Agent messages", response.messages)
# message a system message (non-user)
response = client.send_message(agent_id=agent_state.id, role="system", message="[system] user has logged in. send a friendly message.")
print("Usage", response.usage)
print("Agent messages", response.messages)
# delete the agent
client.delete_agent(agent_id=agent_state.id)

View File

@@ -0,0 +1,40 @@
from letta import EmbeddingConfig, LLMConfig, create_client
client = create_client()
# set automatic defaults for LLM/embedding config
client.set_default_llm_config(
LLMConfig(model="gpt-4o-mini", model_endpoint_type="openai", model_endpoint="https://api.openai.com/v1", context_window=128000)
)
client.set_default_embedding_config(
EmbeddingConfig(
embedding_endpoint_type="openai",
embedding_endpoint="https://api.openai.com/v1",
embedding_model="text-embedding-ada-002",
embedding_dim=1536,
embedding_chunk_size=300,
)
)
# create a new agent
agent_state = client.create_agent()
print(f"Created agent with name {agent_state.name} and unique ID {agent_state.id}")
# Message an agent
response = client.send_message(agent_id=agent_state.id, role="user", message="hello")
print("Usage", response.usage)
print("Agent messages", response.messages)
# list all agents
agents = client.list_agents()
# get the agent by ID
agent_state = client.get_agent(agent_id=agent_state.id)
# get the agent by name
agent_id = client.get_agent_id(agent_name=agent_state.name)
agent_state = client.get_agent(agent_id=agent_id)
# delete an agent
client.delete_agent(agent_id=agent_state.id)

0
examples/docs/memory.py Normal file
View File

View File

@@ -1,5 +1,3 @@
import json
from letta import create_client
from letta.schemas.memory import ChatMemory
@@ -15,14 +13,25 @@ def main():
# Connect to the server as a user
client = create_client(base_url="http://localhost:8283")
# list available configs on the server
llm_configs = client.list_llm_configs()
print(f"Available LLM configs: {llm_configs}")
embedding_configs = client.list_embedding_configs()
print(f"Available embedding configs: {embedding_configs}")
# Create an agent
agent_state = client.create_agent(name="my_agent", memory=ChatMemory(human="My name is Sarah.", persona="I am a friendly AI."))
agent_state = client.create_agent(
name="my_agent",
memory=ChatMemory(human="My name is Sarah.", persona="I am a friendly AI."),
embedding_config=embedding_configs[0],
llm_config=llm_configs[0],
)
print(f"Created agent: {agent_state.name} with ID {str(agent_state.id)}")
# Send a message to the agent
print(f"Created agent: {agent_state.name} with ID {str(agent_state.id)}")
send_message_response = client.user_message(agent_id=agent_state.id, message="Whats my name?")
print(f"Recieved response: \n{json.dumps(send_message_response.messages, indent=4)}")
response = client.user_message(agent_id=agent_state.id, message="Whats my name?")
print(f"Recieved response:", response.messages)
# Delete agent
client.delete_agent(agent_id=agent_state.id)

68
examples/docs/tools.py Normal file
View File

@@ -0,0 +1,68 @@
from letta import EmbeddingConfig, LLMConfig, create_client
client = create_client()
# set automatic defaults for LLM/embedding config
client.set_default_llm_config(
LLMConfig(model="gpt-4", model_endpoint_type="openai", model_endpoint="https://api.openai.com/v1", context_window=8000)
)
client.set_default_embedding_config(
EmbeddingConfig(
embedding_endpoint_type="openai",
embedding_endpoint="https://api.openai.com/v1",
embedding_model="text-embedding-ada-002",
embedding_dim=1536,
embedding_chunk_size=300,
)
)
# define a function with a docstring
def roll_d20() -> str:
"""
Simulate the roll of a 20-sided die (d20).
This function generates a random integer between 1 and 20, inclusive,
which represents the outcome of a single roll of a d20.
Returns:
int: A random integer between 1 and 20, representing the die roll.
Example:
>>> roll_d20()
15 # This is an example output and may vary each time the function is called.
"""
import random
dice_role_outcome = random.randint(1, 20)
output_string = f"You rolled a {dice_role_outcome}"
return output_string
tool = client.create_tool(roll_d20, name="roll_dice")
# create a new agent
agent_state = client.create_agent(tools=[tool.name])
print(f"Created agent with name {agent_state.name} with tools {agent_state.tools}")
# Message an agent
response = client.send_message(agent_id=agent_state.id, role="user", message="roll a dice")
print("Usage", response.usage)
print("Agent messages", response.messages)
# remove a tool from the agent
client.remove_tool_from_agent(agent_id=agent_state.id, tool_id=tool.id)
# add a tool to the agent
client.add_tool_to_agent(agent_id=agent_state.id, tool_id=tool.id)
client.delete_agent(agent_id=agent_state.id)
# create an agent with only a subset of default tools
agent_state = client.create_agent(include_base_tools=False, tools=[tool.name, "send_message"])
# message the agent to search archival memory (will be unable to do so)
response = client.send_message(agent_id=agent_state.id, role="user", message="search your archival memory")
print("Usage", response.usage)
print("Agent messages", response.messages)
client.delete_agent(agent_id=agent_state.id)

View File

@@ -1,27 +0,0 @@
import json
from letta import create_client
from letta.memory import ChatMemory
def main():
# Create a `LocalClient`
client = create_client()
# Create an agent
agent_state = client.create_agent(name="my_agent", memory=ChatMemory(human="My name is Sarah.", persona="I am a friendly AI."))
print(f"Created agent: {agent_state.name} with ID {str(agent_state.id)}")
# Send a message to the agent
print(f"Created agent: {agent_state.name} with ID {str(agent_state.id)}")
send_message_response = client.user_message(agent_id=agent_state.id, message="Whats my name?")
print(f"Recieved response: \n{json.dumps(send_message_response.messages, indent=4)}")
# Delete agent
client.delete_agent(agent_id=agent_state.id)
print(f"Deleted agent: {agent_state.name} with ID {str(agent_state.id)}")
if __name__ == "__main__":
main()

View File

@@ -1,48 +0,0 @@
from openai import OpenAI
"""
This script provides an example of how you can use OpenAI's python client with a Letta server.
Before running this example, make sure you start the OpenAI-compatible REST server with `letta server`.
"""
def main():
client = OpenAI(base_url="http://localhost:8283/v1")
# create assistant (creates a letta preset)
assistant = client.beta.assistants.create(
name="Math Tutor",
instructions="You are a personal math tutor. Write and run code to answer math questions.",
model="gpt-4-turbo-preview",
)
# create thread (creates a letta agent)
thread = client.beta.threads.create()
# create a message (appends a message to the letta agent)
message = client.beta.threads.messages.create(
thread_id=thread.id, role="user", content="I need to solve the equation `3x + 11 = 14`. Can you help me?"
)
# create a run (executes the agent on the messages)
# NOTE: Letta does not support polling yet, so run status is always "completed"
run = client.beta.threads.runs.create(
thread_id=thread.id, assistant_id=assistant.id, instructions="Please address the user as Jane Doe. The user has a premium account."
)
# Store the run ID
run.id
# Retrieve all messages from the thread
messages = client.beta.threads.messages.list(thread_id=thread.id)
# Print all messages from the thread
for msg in messages.messages:
role = msg["role"]
content = msg["content"][0]
print(f"{role.capitalize()}: {content}")
if __name__ == "__main__":
main()

View File

@@ -13,7 +13,7 @@ class LLMConfig(BaseModel):
model_endpoint (str): The endpoint for the model.
model_wrapper (str): The wrapper for the model. This is used to wrap additional text around the input/output of the model. This is useful for text-to-text completions, such as the Completions API in OpenAI.
context_window (int): The context window size for the model.
put_inner_thoughts_in_kwargs (bool): Puts 'inner_thoughts' as a kwarg in the function call if this is set to True. This helps with function calling performance and also the generation of inner thoughts.
put_inner_thoughts_in_kwargs (bool): Puts `inner_thoughts` as a kwarg in the function call if this is set to True. This helps with function calling performance and also the generation of inner thoughts.
"""
# TODO: 🤮 don't default to a vendor! bug city!
@@ -67,6 +67,12 @@ class LLMConfig(BaseModel):
@classmethod
def default_config(cls, model_name: str):
"""
Convinience function to generate a default `LLMConfig` from a model name. Only some models are supported in this function.
Args:
model_name (str): The name of the model (gpt-4, gpt-4o-mini, letta).
"""
if model_name == "gpt-4":
return cls(
model="gpt-4",

View File

@@ -11,15 +11,25 @@ class BaseToolRule(LettaBase):
class ToolRule(BaseToolRule):
"""
A ToolRule represents a tool that can be invoked by the agent.
"""
type: str = Field("ToolRule")
children: List[str] = Field(..., description="The children tools that can be invoked.")
class InitToolRule(BaseToolRule):
"""
Represents the initial tool rule configuration.
"""
type: str = Field("InitToolRule")
"""Represents the initial tool rule configuration."""
class TerminalToolRule(BaseToolRule):
"""
Represents a terminal tool rule configuration where if this tool gets called, it must end the agent loop.
"""
type: str = Field("TerminalToolRule")
"""Represents a terminal tool rule configuration where if this tool gets called, it must end the agent loop."""