chore: officially migrate to submodule (#4502)

* remove apps/core and apps/fern

* fix precommit

* add submodule updates in workflows

* submodule

* remove core tests

* update core revision

* Add submodules: true to all GitHub workflows

- Ensure all workflows can access git submodules
- Add submodules support to deployment, test, and CI workflows
- Fix YAML syntax issues in workflow files

🤖 Generated with [Claude Code](https://claude.ai/code)

Co-Authored-By: Claude <noreply@anthropic.com>

* remove core-lint

* upgrade core with latest main of oss

---------

Co-authored-by: Claude <noreply@anthropic.com>
This commit is contained in:
Kian Jones
2025-09-09 12:45:53 -07:00
committed by GitHub
parent 48b5722095
commit 22f70ca07c
953 changed files with 0 additions and 181472 deletions

View File

@@ -1,421 +0,0 @@
from typing import List, Literal, Optional
from letta.agent import Agent
from letta.constants import CORE_MEMORY_LINE_NUMBER_WARNING
def send_message(self: "Agent", message: str) -> Optional[str]:
"""
Sends a message to the human user.
Args:
message (str): Message contents. All unicode (including emojis) are supported.
Returns:
Optional[str]: None is always returned as this function does not produce a response.
"""
# FIXME passing of msg_obj here is a hack, unclear if guaranteed to be the correct reference
if self.interface:
self.interface.assistant_message(message) # , msg_obj=self._messages[-1])
return None
def conversation_search(
self: "Agent",
query: str,
roles: Optional[List[Literal["assistant", "user", "tool"]]] = None,
limit: Optional[int] = None,
start_date: Optional[str] = None,
end_date: Optional[str] = None,
) -> Optional[str]:
"""
Search prior conversation history using hybrid search (text + semantic similarity).
Args:
query (str): String to search for using both text matching and semantic similarity.
roles (Optional[List[Literal["assistant", "user", "tool"]]]): Optional list of message roles to filter by.
limit (Optional[int]): Maximum number of results to return. Uses system default if not specified.
start_date (Optional[str]): Filter results to messages created on or after this date (INCLUSIVE). When using date-only format (e.g., "2024-01-15"), includes messages starting from 00:00:00 of that day. ISO 8601 format: "YYYY-MM-DD" or "YYYY-MM-DDTHH:MM". Examples: "2024-01-15" (from start of Jan 15), "2024-01-15T14:30" (from 2:30 PM on Jan 15).
end_date (Optional[str]): Filter results to messages created on or before this date (INCLUSIVE). When using date-only format (e.g., "2024-01-20"), includes all messages from that entire day. ISO 8601 format: "YYYY-MM-DD" or "YYYY-MM-DDTHH:MM". Examples: "2024-01-20" (includes all of Jan 20), "2024-01-20T17:00" (up to 5 PM on Jan 20).
Examples:
# Search all messages
conversation_search(query="project updates")
# Search only assistant messages
conversation_search(query="error handling", roles=["assistant"])
# Search with date range (inclusive of both dates)
conversation_search(query="meetings", start_date="2024-01-15", end_date="2024-01-20")
# This includes all messages from Jan 15 00:00:00 through Jan 20 23:59:59
# Search messages from a specific day (inclusive)
conversation_search(query="bug reports", start_date="2024-09-04", end_date="2024-09-04")
# This includes ALL messages from September 4, 2024
# Search with specific time boundaries
conversation_search(query="deployment", start_date="2024-01-15T09:00", end_date="2024-01-15T17:30")
# This includes messages from 9 AM to 5:30 PM on Jan 15
# Search with limit
conversation_search(query="debugging", limit=10)
Returns:
str: Query result string containing matching messages with timestamps and content.
"""
from letta.constants import RETRIEVAL_QUERY_DEFAULT_PAGE_SIZE
from letta.helpers.json_helpers import json_dumps
# Use provided limit or default
if limit is None:
limit = RETRIEVAL_QUERY_DEFAULT_PAGE_SIZE
messages = self.message_manager.list_messages_for_agent(
agent_id=self.agent_state.id,
actor=self.user,
query_text=query,
roles=roles,
limit=limit,
)
if len(messages) == 0:
results_str = "No results found."
else:
results_pref = f"Found {len(messages)} results:"
results_formatted = []
for message in messages:
# Extract text content from message
text_content = message.content[0].text if message.content else ""
result_entry = {"role": message.role, "content": text_content}
results_formatted.append(result_entry)
results_str = f"{results_pref} {json_dumps(results_formatted)}"
return results_str
async def archival_memory_insert(self: "Agent", content: str, tags: Optional[list[str]] = None) -> Optional[str]:
"""
Add to archival memory. Make sure to phrase the memory contents such that it can be easily queried later.
Args:
content (str): Content to write to the memory. All unicode (including emojis) are supported.
tags (Optional[list[str]]): Optional list of tags to associate with this memory for better organization and filtering.
Returns:
Optional[str]: None is always returned as this function does not produce a response.
"""
raise NotImplementedError("This should never be invoked directly. Contact Letta if you see this error message.")
async def archival_memory_search(
self: "Agent",
query: str,
tags: Optional[list[str]] = None,
tag_match_mode: Literal["any", "all"] = "any",
top_k: Optional[int] = None,
start_datetime: Optional[str] = None,
end_datetime: Optional[str] = None,
) -> Optional[str]:
"""
Search archival memory using semantic (embedding-based) search with optional temporal filtering.
Args:
query (str): String to search for using semantic similarity.
tags (Optional[list[str]]): Optional list of tags to filter search results. Only passages with these tags will be returned.
tag_match_mode (Literal["any", "all"]): How to match tags - "any" to match passages with any of the tags, "all" to match only passages with all tags. Defaults to "any".
top_k (Optional[int]): Maximum number of results to return. Uses system default if not specified.
start_datetime (Optional[str]): Filter results to passages created on or after this datetime (INCLUSIVE). When using date-only format (e.g., "2024-01-15"), includes passages starting from 00:00:00 of that day. ISO 8601 format: "YYYY-MM-DD" or "YYYY-MM-DDTHH:MM". Examples: "2024-01-15" (from start of Jan 15), "2024-01-15T14:30" (from 2:30 PM on Jan 15).
end_datetime (Optional[str]): Filter results to passages created on or before this datetime (INCLUSIVE). When using date-only format (e.g., "2024-01-20"), includes all passages from that entire day. ISO 8601 format: "YYYY-MM-DD" or "YYYY-MM-DDTHH:MM". Examples: "2024-01-20" (includes all of Jan 20), "2024-01-20T17:00" (up to 5 PM on Jan 20).
Examples:
# Search all passages
archival_memory_search(query="project updates")
# Search with date range (inclusive of both dates)
archival_memory_search(query="meetings", start_datetime="2024-01-15", end_datetime="2024-01-20")
# This includes all passages from Jan 15 00:00:00 through Jan 20 23:59:59
# Search passages from a specific day (inclusive)
archival_memory_search(query="bug reports", start_datetime="2024-09-04", end_datetime="2024-09-04")
# This includes ALL passages from September 4, 2024
# Search with specific time range
archival_memory_search(query="error logs", start_datetime="2024-01-15T09:30", end_datetime="2024-01-15T17:30")
# This includes passages from 9:30 AM to 5:30 PM on Jan 15
# Search from a specific point in time onwards
archival_memory_search(query="customer feedback", start_datetime="2024-01-15T14:00")
Returns:
str: Query result string containing matching passages with timestamps and content.
"""
raise NotImplementedError("This should never be invoked directly. Contact Letta if you see this error message.")
def core_memory_append(agent_state: "AgentState", label: str, content: str) -> Optional[str]: # type: ignore
"""
Append to the contents of core memory.
Args:
label (str): Section of the memory to be edited.
content (str): Content to write to the memory. All unicode (including emojis) are supported.
Returns:
Optional[str]: None is always returned as this function does not produce a response.
"""
current_value = str(agent_state.memory.get_block(label).value)
new_value = current_value + "\n" + str(content)
agent_state.memory.update_block_value(label=label, value=new_value)
return None
def core_memory_replace(agent_state: "AgentState", label: str, old_content: str, new_content: str) -> Optional[str]: # type: ignore
"""
Replace the contents of core memory. To delete memories, use an empty string for new_content.
Args:
label (str): Section of the memory to be edited.
old_content (str): String to replace. Must be an exact match.
new_content (str): Content to write to the memory. All unicode (including emojis) are supported.
Returns:
Optional[str]: None is always returned as this function does not produce a response.
"""
current_value = str(agent_state.memory.get_block(label).value)
if old_content not in current_value:
raise ValueError(f"Old content '{old_content}' not found in memory block '{label}'")
new_value = current_value.replace(str(old_content), str(new_content))
agent_state.memory.update_block_value(label=label, value=new_value)
return None
def rethink_memory(agent_state: "AgentState", new_memory: str, target_block_label: str) -> None:
"""
Rewrite memory block for the main agent, new_memory should contain all current information from the block that is not outdated or inconsistent, integrating any new information, resulting in a new memory block that is organized, readable, and comprehensive.
Args:
new_memory (str): The new memory with information integrated from the memory block. If there is no new information, then this should be the same as the content in the source block.
target_block_label (str): The name of the block to write to.
Returns:
None: None is always returned as this function does not produce a response.
"""
if agent_state.memory.get_block(target_block_label) is None:
agent_state.memory.create_block(label=target_block_label, value=new_memory)
agent_state.memory.update_block_value(label=target_block_label, value=new_memory)
return None
## Attempted v2 of sleep-time function set, meant to work better across all types
SNIPPET_LINES: int = 4
# Based off of: https://github.com/anthropics/anthropic-quickstarts/blob/main/computer-use-demo/computer_use_demo/tools/edit.py?ref=musings.yasyf.com#L154
def memory_replace(agent_state: "AgentState", label: str, old_str: str, new_str: str) -> str: # type: ignore
"""
The memory_replace command allows you to replace a specific string in a memory block with a new string. This is used for making precise edits.
Args:
label (str): Section of the memory to be edited, identified by its label.
old_str (str): The text to replace (must match exactly, including whitespace and indentation).
new_str (str): The new text to insert in place of the old text. Do not include line number prefixes.
Examples:
# Update a block containing information about the user
memory_replace(label="human", old_str="Their name is Alice", new_str="Their name is Bob")
# Update a block containing a todo list
memory_replace(label="todos", old_str="- [ ] Step 5: Search the web", new_str="- [x] Step 5: Search the web")
# Pass an empty string to
memory_replace(label="human", old_str="Their name is Alice", new_str="")
# Bad example - do NOT add (view-only) line numbers to the args
memory_replace(label="human", old_str="Line 1: Their name is Alice", new_str="Line 1: Their name is Bob")
# Bad example - do NOT include the number number warning either
memory_replace(label="human", old_str="# NOTE: Line numbers shown below are to help during editing. Do NOT include line number prefixes in your memory edit tool calls.\\nLine 1: Their name is Alice", new_str="Line 1: Their name is Bob")
# Good example - no line numbers or line number warning (they are view-only), just the text
memory_replace(label="human", old_str="Their name is Alice", new_str="Their name is Bob")
Returns:
str: The success message
"""
import re
if bool(re.search(r"\nLine \d+: ", old_str)):
raise ValueError(
"old_str contains a line number prefix, which is not allowed. Do not include line numbers when calling memory tools (line numbers are for display purposes only)."
)
if CORE_MEMORY_LINE_NUMBER_WARNING in old_str:
raise ValueError(
"old_str contains a line number warning, which is not allowed. Do not include line number information when calling memory tools (line numbers are for display purposes only)."
)
if bool(re.search(r"\nLine \d+: ", new_str)):
raise ValueError(
"new_str contains a line number prefix, which is not allowed. Do not include line numbers when calling memory tools (line numbers are for display purposes only)."
)
old_str = str(old_str).expandtabs()
new_str = str(new_str).expandtabs()
current_value = str(agent_state.memory.get_block(label).value).expandtabs()
# Check if old_str is unique in the block
occurences = current_value.count(old_str)
if occurences == 0:
raise ValueError(f"No replacement was performed, old_str `{old_str}` did not appear verbatim in memory block with label `{label}`.")
elif occurences > 1:
content_value_lines = current_value.split("\n")
lines = [idx + 1 for idx, line in enumerate(content_value_lines) if old_str in line]
raise ValueError(
f"No replacement was performed. Multiple occurrences of old_str `{old_str}` in lines {lines}. Please ensure it is unique."
)
# Replace old_str with new_str
new_value = current_value.replace(str(old_str), str(new_str))
# Write the new content to the block
agent_state.memory.update_block_value(label=label, value=new_value)
# Create a snippet of the edited section
# SNIPPET_LINES = 3
# replacement_line = current_value.split(old_str)[0].count("\n")
# start_line = max(0, replacement_line - SNIPPET_LINES)
# end_line = replacement_line + SNIPPET_LINES + new_str.count("\n")
# snippet = "\n".join(new_value.split("\n")[start_line : end_line + 1])
# Prepare the success message
success_msg = f"The core memory block with label `{label}` has been edited. "
# success_msg += self._make_output(
# snippet, f"a snippet of {path}", start_line + 1
# )
# success_msg += f"A snippet of core memory block `{label}`:\n{snippet}\n"
success_msg += "Review the changes and make sure they are as expected (correct indentation, no duplicate lines, etc). Edit the memory block again if necessary."
# return None
return success_msg
def memory_insert(agent_state: "AgentState", label: str, new_str: str, insert_line: int = -1) -> Optional[str]: # type: ignore
"""
The memory_insert command allows you to insert text at a specific location in a memory block.
Args:
label (str): Section of the memory to be edited, identified by its label.
new_str (str): The text to insert. Do not include line number prefixes.
insert_line (int): The line number after which to insert the text (0 for beginning of file). Defaults to -1 (end of the file).
Examples:
# Update a block containing information about the user (append to the end of the block)
memory_insert(label="customer", new_str="The customer's ticket number is 12345")
# Update a block containing information about the user (insert at the beginning of the block)
memory_insert(label="customer", new_str="The customer's ticket number is 12345", insert_line=0)
Returns:
Optional[str]: None is always returned as this function does not produce a response.
"""
import re
if bool(re.search(r"\nLine \d+: ", new_str)):
raise ValueError(
"new_str contains a line number prefix, which is not allowed. Do not include line numbers when calling memory tools (line numbers are for display purposes only)."
)
if CORE_MEMORY_LINE_NUMBER_WARNING in new_str:
raise ValueError(
"new_str contains a line number warning, which is not allowed. Do not include line number information when calling memory tools (line numbers are for display purposes only)."
)
current_value = str(agent_state.memory.get_block(label).value).expandtabs()
new_str = str(new_str).expandtabs()
current_value_lines = current_value.split("\n")
n_lines = len(current_value_lines)
# Check if we're in range, from 0 (pre-line), to 1 (first line), to n_lines (last line)
if insert_line == -1:
insert_line = n_lines
elif insert_line < 0 or insert_line > n_lines:
raise ValueError(
f"Invalid `insert_line` parameter: {insert_line}. It should be within the range of lines of the memory block: {[0, n_lines]}, or -1 to append to the end of the memory block."
)
# Insert the new string as a line
new_str_lines = new_str.split("\n")
new_value_lines = current_value_lines[:insert_line] + new_str_lines + current_value_lines[insert_line:]
snippet_lines = (
current_value_lines[max(0, insert_line - SNIPPET_LINES) : insert_line]
+ new_str_lines
+ current_value_lines[insert_line : insert_line + SNIPPET_LINES]
)
# Collate into the new value to update
new_value = "\n".join(new_value_lines)
# snippet = "\n".join(snippet_lines)
# Write into the block
agent_state.memory.update_block_value(label=label, value=new_value)
# Prepare the success message
success_msg = f"The core memory block with label `{label}` has been edited. "
# success_msg += self._make_output(
# snippet,
# "a snippet of the edited file",
# max(1, insert_line - SNIPPET_LINES + 1),
# )
# success_msg += f"A snippet of core memory block `{label}`:\n{snippet}\n"
success_msg += "Review the changes and make sure they are as expected (correct indentation, no duplicate lines, etc). Edit the memory block again if necessary."
return success_msg
def memory_rethink(agent_state: "AgentState", label: str, new_memory: str) -> None:
"""
The memory_rethink command allows you to completely rewrite the contents of a memory block. Use this tool to make large sweeping changes (e.g. when you want to condense or reorganize the memory blocks), do NOT use this tool to make small precise edits (e.g. add or remove a line, replace a specific string, etc).
Args:
label (str): The memory block to be rewritten, identified by its label.
new_memory (str): The new memory contents with information integrated from existing memory blocks and the conversation context.
Returns:
None: None is always returned as this function does not produce a response.
"""
import re
if bool(re.search(r"\nLine \d+: ", new_memory)):
raise ValueError(
"new_memory contains a line number prefix, which is not allowed. Do not include line numbers when calling memory tools (line numbers are for display purposes only)."
)
if CORE_MEMORY_LINE_NUMBER_WARNING in new_memory:
raise ValueError(
"new_memory contains a line number warning, which is not allowed. Do not include line number information when calling memory tools (line numbers are for display purposes only)."
)
if agent_state.memory.get_block(label) is None:
agent_state.memory.create_block(label=label, value=new_memory)
agent_state.memory.update_block_value(label=label, value=new_memory)
# Prepare the success message
success_msg = f"The core memory block with label `{label}` has been edited. "
# success_msg += self._make_output(
# snippet, f"a snippet of {path}", start_line + 1
# )
# success_msg += f"A snippet of core memory block `{label}`:\n{snippet}\n"
success_msg += "Review the changes and make sure they are as expected (correct indentation, no duplicate lines, etc). Edit the memory block again if necessary."
# return None
return success_msg
def memory_finish_edits(agent_state: "AgentState") -> None: # type: ignore
"""
Call the memory_finish_edits command when you are finished making edits (integrating all new information) into the memory blocks. This function is called when the agent is done rethinking the memory.
Returns:
Optional[str]: None is always returned as this function does not produce a response.
"""
return None

View File

@@ -1,66 +0,0 @@
from typing import List, Literal, Optional
def run_code(code: str, language: Literal["python", "js", "ts", "r", "java"]) -> str:
"""
Run code in a sandbox. Supports Python, Javascript, Typescript, R, and Java.
Args:
code (str): The code to run.
language (Literal["python", "js", "ts", "r", "java"]): The language of the code.
Returns:
str: The output of the code, the stdout, the stderr, and error traces (if any).
"""
raise NotImplementedError("This is only available on the latest agent architecture. Please contact the Letta team.")
async def web_search(
query: str,
num_results: int = 10,
category: Optional[
Literal["company", "research paper", "news", "pdf", "github", "tweet", "personal site", "linkedin profile", "financial report"]
] = None,
include_text: bool = False,
include_domains: Optional[List[str]] = None,
exclude_domains: Optional[List[str]] = None,
start_published_date: Optional[str] = None,
end_published_date: Optional[str] = None,
user_location: Optional[str] = None,
) -> str:
"""
Search the web using Exa's AI-powered search engine and retrieve relevant content.
Examples:
web_search("Tesla Q1 2025 earnings report", num_results=5, category="financial report")
web_search("Latest research in large language models", category="research paper", include_domains=["arxiv.org", "paperswithcode.com"])
web_search("Letta API documentation core_memory_append", num_results=3)
Args:
query (str): The search query to find relevant web content.
num_results (int, optional): Number of results to return (1-100). Defaults to 10.
category (Optional[Literal], optional): Focus search on specific content types. Defaults to None.
include_text (bool, optional): Whether to retrieve full page content. Defaults to False (only returns summary and highlights, since the full text usually will overflow the context window).
include_domains (Optional[List[str]], optional): List of domains to include in search results. Defaults to None.
exclude_domains (Optional[List[str]], optional): List of domains to exclude from search results. Defaults to None.
start_published_date (Optional[str], optional): Only return content published after this date (ISO format). Defaults to None.
end_published_date (Optional[str], optional): Only return content published before this date (ISO format). Defaults to None.
user_location (Optional[str], optional): Two-letter country code for localized results (e.g., "US"). Defaults to None.
Returns:
str: A JSON-encoded string containing search results with title, URL, content, highlights, and summary.
"""
raise NotImplementedError("This is only available on the latest agent architecture. Please contact the Letta team.")
async def fetch_webpage(url: str) -> str:
"""
Fetch a webpage and convert it to markdown/text format using Jina AI reader.
Args:
url: The URL of the webpage to fetch and convert
Returns:
String containing the webpage content in markdown/text format
"""
raise NotImplementedError("This is only available on the latest agent architecture. Please contact the Letta team.")

View File

@@ -1,135 +0,0 @@
import os
import uuid
from typing import Optional
import requests
from letta.constants import MESSAGE_CHATGPT_FUNCTION_MODEL, MESSAGE_CHATGPT_FUNCTION_SYSTEM_MESSAGE
from letta.helpers.json_helpers import json_dumps, json_loads
from letta.llm_api.llm_api_tools import create
from letta.schemas.letta_message_content import TextContent
from letta.schemas.message import Message
def message_chatgpt(self, message: str):
"""
Send a message to a more basic AI, ChatGPT. A useful resource for asking questions. ChatGPT does not retain memory of previous interactions.
Args:
message (str): Message to send ChatGPT. Phrase your message as a full English sentence.
Returns:
str: Reply message from ChatGPT
"""
dummy_user_id = uuid.uuid4()
dummy_agent_id = uuid.uuid4()
message_sequence = [
Message(
user_id=dummy_user_id,
agent_id=dummy_agent_id,
role="system",
content=[TextContent(text=MESSAGE_CHATGPT_FUNCTION_SYSTEM_MESSAGE)],
),
Message(user_id=dummy_user_id, agent_id=dummy_agent_id, role="user", content=[TextContent(text=str(message))]),
]
# TODO: this will error without an LLMConfig
response = create(
model=MESSAGE_CHATGPT_FUNCTION_MODEL,
messages=message_sequence,
)
reply = response.choices[0].message.content
return reply
def read_from_text_file(self, filename: str, line_start: int, num_lines: Optional[int] = 1):
"""
Read lines from a text file.
Args:
filename (str): The name of the file to read.
line_start (int): Line to start reading from.
num_lines (Optional[int]): How many lines to read (defaults to 1).
Returns:
str: Text read from the file
"""
max_chars = 500
trunc_message = True
if not os.path.exists(filename):
raise FileNotFoundError(f"The file '{filename}' does not exist.")
if line_start < 1 or num_lines < 1:
raise ValueError("Both line_start and num_lines must be positive integers.")
lines = []
chars_read = 0
with open(filename, "r", encoding="utf-8") as file:
for current_line_number, line in enumerate(file, start=1):
if line_start <= current_line_number < line_start + num_lines:
chars_to_add = len(line)
if max_chars is not None and chars_read + chars_to_add > max_chars:
# If adding this line exceeds MAX_CHARS, truncate the line if needed and stop reading further.
excess_chars = (chars_read + chars_to_add) - max_chars
lines.append(line[:-excess_chars].rstrip("\n"))
if trunc_message:
lines.append(f"[SYSTEM ALERT - max chars ({max_chars}) reached during file read]")
break
else:
lines.append(line.rstrip("\n"))
chars_read += chars_to_add
if current_line_number >= line_start + num_lines - 1:
break
return "\n".join(lines)
def append_to_text_file(self, filename: str, content: str):
"""
Append to a text file.
Args:
filename (str): The name of the file to append to.
content (str): Content to append to the file.
Returns:
Optional[str]: None is always returned as this function does not produce a response.
"""
if not os.path.exists(filename):
raise FileNotFoundError(f"The file '{filename}' does not exist.")
with open(filename, "a", encoding="utf-8") as file:
file.write(content + "\n")
def http_request(self, method: str, url: str, payload_json: Optional[str] = None):
"""
Generates an HTTP request and returns the response.
Args:
method (str): The HTTP method (e.g., 'GET', 'POST').
url (str): The URL for the request.
payload_json (Optional[str]): A JSON string representing the request payload.
Returns:
dict: The response from the HTTP request.
"""
try:
headers = {"Content-Type": "application/json"}
# For GET requests, ignore the payload
if method.upper() == "GET":
print(f"[HTTP] launching GET request to {url}")
response = requests.get(url, headers=headers)
else:
# Validate and convert the payload for other types of requests
if payload_json:
payload = json_loads(payload_json)
else:
payload = {}
print(f"[HTTP] launching {method} request to {url}, payload=\n{json_dumps(payload, indent=2)}")
response = requests.request(method, url, json=payload, headers=headers)
return {"status_code": response.status_code, "headers": dict(response.headers), "body": response.text}
except Exception as e:
return {"error": str(e)}

View File

@@ -1,97 +0,0 @@
from typing import TYPE_CHECKING, List, Optional
from letta.functions.types import FileOpenRequest
if TYPE_CHECKING:
from letta.schemas.agent import AgentState
from letta.schemas.file import FileMetadata
async def open_files(agent_state: "AgentState", file_requests: List[FileOpenRequest], close_all_others: bool = False) -> str:
"""Open one or more files and load their contents into files section in core memory. Maximum of 5 files can be opened simultaneously.
Use this when you want to:
- Inspect or reference file contents during reasoning
- View specific portions of large files (e.g. functions or definitions)
- Replace currently open files with a new set for focused context (via `close_all_others=True`)
Examples:
Open single file belonging to a directory named `project_utils` (entire content):
file_requests = [FileOpenRequest(file_name="project_utils/config.py")]
Open multiple files with different view ranges:
file_requests = [
FileOpenRequest(file_name="project_utils/config.py", offset=0, length=50), # Lines 1-50
FileOpenRequest(file_name="project_utils/main.py", offset=100, length=100), # Lines 101-200
FileOpenRequest(file_name="project_utils/utils.py") # Entire file
]
Close all other files and open new ones:
open_files(agent_state, file_requests, close_all_others=True)
Args:
file_requests (List[FileOpenRequest]): List of file open requests, each specifying file name and optional view range.
close_all_others (bool): If True, closes all other currently open files first. Defaults to False.
Returns:
str: A status message
"""
raise NotImplementedError("Tool not implemented. Please contact the Letta team.")
async def grep_files(
agent_state: "AgentState",
pattern: str,
include: Optional[str] = None,
context_lines: Optional[int] = 1,
offset: Optional[int] = None,
) -> str:
"""
Searches file contents for pattern matches with surrounding context.
Results are paginated - shows 20 matches per call. The response includes:
- A summary of total matches and which files contain them
- The current page of matches (20 at a time)
- Instructions for viewing more matches using the offset parameter
Example usage:
First call: grep_files(pattern="TODO")
Next call: grep_files(pattern="TODO", offset=20) # Shows matches 21-40
Returns search results containing:
- Summary with total match count and file distribution
- List of files with match counts per file
- Current page of matches (up to 20)
- Navigation hint for next page if more matches exist
Args:
pattern (str): Keyword or regex pattern to search within file contents.
include (Optional[str]): Optional keyword or regex pattern to filter filenames to include in the search.
context_lines (Optional[int]): Number of lines of context to show before and after each match.
Equivalent to `-C` in grep_files. Defaults to 1.
offset (Optional[int]): Number of matches to skip before showing results. Used for pagination.
For example, offset=20 shows matches starting from the 21st match.
Use offset=0 (or omit) for first page, offset=20 for second page,
offset=40 for third page, etc. The tool will tell you the exact
offset to use for the next page.
"""
raise NotImplementedError("Tool not implemented. Please contact the Letta team.")
async def semantic_search_files(agent_state: "AgentState", query: str, limit: int = 5) -> List["FileMetadata"]:
"""
Searches file contents using semantic meaning rather than exact matches.
Ideal for:
- Finding conceptually related information across files
- Discovering relevant content without knowing exact keywords
- Locating files with similar topics or themes
Args:
query (str): The search query text to find semantically similar content.
limit: Maximum number of results to return (default: 5)
Returns:
List[FileMetadata]: List of matching files.
"""
raise NotImplementedError("Tool not implemented. Please contact the Letta team.")

View File

@@ -1,160 +0,0 @@
import asyncio
import json
from concurrent.futures import ThreadPoolExecutor, as_completed
from typing import TYPE_CHECKING, List
from letta.functions.helpers import (
_send_message_to_all_agents_in_group_async,
execute_send_message_to_agent,
extract_send_message_from_steps_messages,
fire_and_forget_send_to_agent,
)
from letta.schemas.enums import MessageRole
from letta.schemas.message import MessageCreate
from letta.server.rest_api.utils import get_letta_server
from letta.settings import settings
if TYPE_CHECKING:
from letta.agent import Agent
def send_message_to_agent_and_wait_for_reply(self: "Agent", message: str, other_agent_id: str) -> str:
"""
Sends a message to a specific Letta agent within the same organization and waits for a response. The sender's identity is automatically included, so no explicit introduction is needed in the message. This function is designed for two-way communication where a reply is expected.
Args:
message (str): The content of the message to be sent to the target agent.
other_agent_id (str): The unique identifier of the target Letta agent.
Returns:
str: The response from the target agent.
"""
augmented_message = (
f"[Incoming message from agent with ID '{self.agent_state.id}' - to reply to this message, "
f"make sure to use the 'send_message' at the end, and the system will notify the sender of your response] "
f"{message}"
)
messages = [MessageCreate(role=MessageRole.system, content=augmented_message, name=self.agent_state.name)]
return execute_send_message_to_agent(
sender_agent=self,
messages=messages,
other_agent_id=other_agent_id,
log_prefix="[send_message_to_agent_and_wait_for_reply]",
)
def send_message_to_agents_matching_tags(self: "Agent", message: str, match_all: List[str], match_some: List[str]) -> List[str]:
"""
Sends a message to all agents within the same organization that match the specified tag criteria. Agents must possess *all* of the tags in `match_all` and *at least one* of the tags in `match_some` to receive the message.
Args:
message (str): The content of the message to be sent to each matching agent.
match_all (List[str]): A list of tags that an agent must possess to receive the message.
match_some (List[str]): A list of tags where an agent must have at least one to qualify.
Returns:
List[str]: A list of responses from the agents that matched the filtering criteria. Each
response corresponds to a single agent. Agents that do not respond will not have an entry
in the returned list.
"""
server = get_letta_server()
augmented_message = (
f"[Incoming message from external Letta agent - to reply to this message, "
f"make sure to use the 'send_message' at the end, and the system will notify the sender of your response] "
f"{message}"
)
# Find matching agents
matching_agents = server.agent_manager.list_agents_matching_tags(actor=self.user, match_all=match_all, match_some=match_some)
if not matching_agents:
return []
def process_agent(agent_id: str) -> str:
"""Loads an agent, formats the message, and executes .step()"""
actor = self.user # Ensure correct actor context
agent = server.load_agent(agent_id=agent_id, interface=None, actor=actor)
# Prepare the message
messages = [MessageCreate(role=MessageRole.system, content=augmented_message, name=self.agent_state.name)]
# Run .step() and return the response
usage_stats = agent.step(
input_messages=messages,
chaining=True,
max_chaining_steps=None,
stream=False,
skip_verify=True,
metadata=None,
put_inner_thoughts_first=True,
)
send_messages = extract_send_message_from_steps_messages(usage_stats.steps_messages, logger=agent.logger)
response_data = {
"agent_id": agent_id,
"response_messages": send_messages if send_messages else ["<no response>"],
}
return json.dumps(response_data, indent=2)
# Use ThreadPoolExecutor for parallel execution
results = []
with ThreadPoolExecutor(max_workers=settings.multi_agent_concurrent_sends) as executor:
future_to_agent = {executor.submit(process_agent, agent_state.id): agent_state for agent_state in matching_agents}
for future in as_completed(future_to_agent):
try:
results.append(future.result()) # Collect results
except Exception as e:
# Log or handle failure for specific agents if needed
self.logger.exception(f"Error processing agent {future_to_agent[future]}: {e}")
return results
def send_message_to_all_agents_in_group(self: "Agent", message: str) -> List[str]:
"""
Sends a message to all agents within the same multi-agent group.
Args:
message (str): The content of the message to be sent to each matching agent.
Returns:
List[str]: A list of responses from the agents that matched the filtering criteria. Each
response corresponds to a single agent. Agents that do not respond will not have an entry
in the returned list.
"""
return asyncio.run(_send_message_to_all_agents_in_group_async(self, message))
def send_message_to_agent_async(self: "Agent", message: str, other_agent_id: str) -> str:
"""
Sends a message to a specific Letta agent within the same organization. The sender's identity is automatically included, so no explicit introduction is required in the message. This function does not expect a response from the target agent, making it suitable for notifications or one-way communication.
Args:
message (str): The content of the message to be sent to the target agent.
other_agent_id (str): The unique identifier of the target Letta agent.
Returns:
str: A confirmation message indicating the message was successfully sent.
"""
if settings.environment == "PRODUCTION":
raise RuntimeError("This tool is not allowed to be run on Letta Cloud.")
message = (
f"[Incoming message from agent with ID '{self.agent_state.id}' - to reply to this message, "
f"make sure to use the 'send_message_to_agent_async' tool, or the agent will not receive your message] "
f"{message}"
)
messages = [MessageCreate(role=MessageRole.system, content=message, name=self.agent_state.name)]
# Do the actual fire-and-forget
fire_and_forget_send_to_agent(
sender_agent=self,
messages=messages,
other_agent_id=other_agent_id,
log_prefix="[send_message_to_agent_async]",
use_retries=False, # or True if you want to use _async_send_message_with_retries
)
# Immediately return to caller
return "Successfully sent message"

View File

@@ -1,80 +0,0 @@
## Voice chat + sleeptime tools
from typing import List, Optional
from pydantic import BaseModel, Field
def rethink_user_memory(agent_state: "AgentState", new_memory: str) -> None:
"""
Rewrite memory block for the main agent, new_memory should contain all current information from the block that is not outdated or inconsistent, integrating any new information, resulting in a new memory block that is organized, readable, and comprehensive.
Args:
new_memory (str): The new memory with information integrated from the memory block. If there is no new information, then this should be the same as the content in the source block.
Returns:
None: None is always returned as this function does not produce a response.
"""
# This is implemented directly in the agent loop
return None
def finish_rethinking_memory(agent_state: "AgentState") -> None: # type: ignore
"""
This function is called when the agent is done rethinking the memory.
Returns:
Optional[str]: None is always returned as this function does not produce a response.
"""
return None
class MemoryChunk(BaseModel):
start_index: int = Field(
...,
description="Zero-based index of the first evicted line in this chunk.",
)
end_index: int = Field(
...,
description="Zero-based index of the last evicted line (inclusive).",
)
context: str = Field(
...,
description="1-3 sentence paraphrase capturing key facts/details, user preferences, or goals that this chunk reveals—written for future retrieval.",
)
def store_memories(agent_state: "AgentState", chunks: List[MemoryChunk]) -> None:
"""
Persist dialogue that is about to fall out of the agents context window.
Args:
chunks (List[MemoryChunk]):
Each chunk pinpoints a contiguous block of **evicted** lines and provides a short, forward-looking synopsis (`context`) that will be embedded for future semantic lookup.
Returns:
None
"""
# This is implemented directly in the agent loop
return None
def search_memory(
agent_state: "AgentState",
convo_keyword_queries: Optional[List[str]],
start_minutes_ago: Optional[int],
end_minutes_ago: Optional[int],
) -> Optional[str]:
"""
Look in long-term or earlier-conversation memory only when the user asks about something missing from the visible context. The users latest utterance is sent automatically as the main query.
Args:
convo_keyword_queries (Optional[List[str]]): Extra keywords (e.g., order ID, place name). Use *null* if not appropriate for the latest user message.
start_minutes_ago (Optional[int]): Newer bound of the time window for results, specified in minutes ago. Use *null* if no lower time bound is needed.
end_minutes_ago (Optional[int]): Older bound of the time window, in minutes ago. Use *null* if no upper bound is needed.
Returns:
Optional[str]: A formatted string of matching memory entries, or None if no
relevant memories are found.
"""
# This is implemented directly in the agent loop
return None