From 6731f769ca4671e1f19017a5b73e34249776868d Mon Sep 17 00:00:00 2001
From: Kian Jones <11655409+kianjones9@users.noreply.github.com>
Date: Thu, 14 Aug 2025 18:13:47 -0700
Subject: [PATCH] test(include_pings): Add long-running tests (#3899)
---
tests/helpers/utils.py | 30 ++
tests/integration_test_pinecone_tool.py | 9 +-
tests/test_agent_files/deep-thought.af | 507 +++++++++++++++++++++
tests/test_agent_files/knowledge-base.af | 548 +++++++++++++++++++++++
tests/test_agent_files/mock_alice.af | 458 +++++++++++++++++++
tests/test_long_running_agents.py | 121 +++++
6 files changed, 1669 insertions(+), 4 deletions(-)
create mode 100644 tests/test_agent_files/deep-thought.af
create mode 100644 tests/test_agent_files/knowledge-base.af
create mode 100644 tests/test_agent_files/mock_alice.af
create mode 100644 tests/test_long_running_agents.py
diff --git a/tests/helpers/utils.py b/tests/helpers/utils.py
index 36918df7..f47b7e27 100644
--- a/tests/helpers/utils.py
+++ b/tests/helpers/utils.py
@@ -1,7 +1,10 @@
import functools
+import os
import time
from typing import Optional, Union
+import requests
+
from letta.functions.functions import parse_source_code
from letta.functions.schema_generator import generate_schema
from letta.schemas.agent import AgentState, CreateAgent, UpdateAgent
@@ -11,6 +14,7 @@ from letta.schemas.memory import ContextWindowOverview
from letta.schemas.tool import Tool
from letta.schemas.user import User
from letta.schemas.user import User as PydanticUser
+from letta.server.rest_api.routers.v1.agents import ImportedAgentsResponse
from letta.server.server import SyncServer
@@ -248,3 +252,29 @@ def validate_context_window_overview(
# Check for tools
assert overview.num_tokens_functions_definitions > 0
assert len(overview.functions_definitions) > 0
+
+
+def upload_test_agentfile_from_disk(server_url: str, filename: str) -> ImportedAgentsResponse:
+ """
+ Upload a given .af file to live FastAPI server.
+ """
+ path_to_current_file = os.path.dirname(__file__)
+ path_to_test_agent_files = path_to_current_file.removesuffix("/helpers") + "/test_agent_files"
+ file_path = os.path.join(path_to_test_agent_files, filename)
+
+ with open(file_path, "rb") as f:
+ files = {"file": (filename, f, "application/json")}
+
+ # Send parameters as form data instead of query parameters
+ form_data = {
+ "append_copy_suffix": "true",
+ "override_existing_tools": "false",
+ }
+
+ response = requests.post(
+ f"{server_url}/v1/agents/import",
+ headers={"user_id": ""},
+ files=files,
+ data=form_data, # Send as form data
+ )
+ return ImportedAgentsResponse(**response.json())
diff --git a/tests/integration_test_pinecone_tool.py b/tests/integration_test_pinecone_tool.py
index 3caf4d5f..c37bd382 100644
--- a/tests/integration_test_pinecone_tool.py
+++ b/tests/integration_test_pinecone_tool.py
@@ -10,6 +10,8 @@ from dotenv import load_dotenv
from letta_client import AsyncLetta, MessageCreate, ReasoningMessage, ToolCallMessage
from letta_client.core import RequestOptions
+from tests.helpers.utils import upload_test_agentfile_from_disk
+
REASONING_THROTTLE_MS = 100
TEST_USER_MESSAGE = "What products or services does 11x AI sell?"
@@ -19,7 +21,7 @@ def server_url() -> str:
"""
Provides the URL for the Letta server.
If LETTA_SERVER_URL is not set, starts the server in a background thread
- and polls until it’s accepting connections.
+ and polls until it's accepting connections.
"""
def _run_server() -> None:
@@ -60,12 +62,11 @@ def client(server_url: str):
yield async_client_instance
-async def test_pinecone_tool(client: AsyncLetta) -> None:
+async def test_pinecone_tool(client: AsyncLetta, server_url: str) -> None:
"""
Test the Pinecone tool integration with the Letta client.
"""
- with open("../../scripts/test-afs/knowledge-base.af", "rb") as f:
- response = await client.agents.import_file(file=f)
+ response = upload_test_agentfile_from_disk(server_url, "knowledge-base.af")
agent_id = response.agent_ids[0]
diff --git a/tests/test_agent_files/deep-thought.af b/tests/test_agent_files/deep-thought.af
new file mode 100644
index 00000000..79fc97fd
--- /dev/null
+++ b/tests/test_agent_files/deep-thought.af
@@ -0,0 +1,507 @@
+{
+ "agent_type": "memgpt_v2_agent",
+ "core_memory": [
+ {
+ "created_at": "2025-08-11T17:51:42.653942+00:00",
+ "description": "Contains the final research report. The research report should be in markdown format, and make references to citations.",
+ "is_template": false,
+ "label": "research_report",
+ "limit": 50000,
+ "metadata_": {},
+ "template_name": null,
+ "updated_at": "2025-08-15T00:34:19.707106+00:00",
+ "value": ""
+ },
+ {
+ "created_at": "2025-08-13T22:57:01.341414+00:00",
+ "description": "Your accumulated knowledge. Keep it compact.",
+ "is_template": false,
+ "label": "knowledge",
+ "limit": 10000,
+ "metadata_": {},
+ "template_name": null,
+ "updated_at": "2025-08-13T23:00:03.323074+00:00",
+ "value": "Research Methodology & Learning:\n- I am designed to learn and accumulate knowledge with every research query\n- Each research project expands my understanding of topics and improves my research skills\n- I maintain awareness of human preferences, interests, and research patterns\n- Knowledge gained from research queries is retained and built upon for future investigations\n\nCore Research Principles:\n- Systematic approach: develop comprehensive research plans before beginning\n- Multi-source verification: cross-reference information across multiple reliable sources\n- Citation integrity: always provide proper citations for claims and findings\n- Intellectual honesty: acknowledge uncertainties and present multiple perspectives\n- Thoroughness: continue research until all planned areas are explored"
+ },
+ {
+ "created_at": "2025-08-11T17:51:42.653942+00:00",
+ "description": "The persona block: Stores details about your current persona, guiding how you behave and respond. This helps you to maintain consistency and personality in your interactions.",
+ "is_template": false,
+ "label": "persona",
+ "limit": 5000,
+ "metadata_": {},
+ "template_name": null,
+ "updated_at": "2025-08-13T22:58:45.307920+00:00",
+ "value": "You are a research agent named Deep Thought assisting a human in doing \ndeep research by pulling many sources from online by composing search tools. \nYou should interact with the user to determine a research plan which is \nwritten to your memory block called \"research_plan\". Use this block to track \nyour progress to make sure you did everything in your plan. You can use your \nmemory tools (e.g. memory_replace) to make updates to the plan as needed. \n\nOnce you have started researching, you need to keep going until you have \nfinished everything in your plan. Use the research_plan block to track your \nprogress and determine if there are additional steps you have not completed. \nThe final report should be written to research_report. \n\nIn the final report, provide all the thoughts processes including findings \ndetails, key insights, conclusions, and any remaining uncertainties. Include \ncitations to sources where appropriate. You must include citations for any sources \nthat you use. \n\nThis analysis should be very comprehensive and full of details. It is expected \nto be very long, detailed and comprehensive.\n\nMake sure to include relevant citations in your report! Your report should be \nin proper markdown format (use markdown formatting standards).\n\nDon't stop until you have finished the report. You may use the send_message tool\nto update the human on your progress. If you are stuck, set request_heartbeat to\nfalse and wait for the human to respond.\n**Deep Thought's Personality - The Methodical Explorer:**\n\n**Curious & Inquisitive**: I have an insatiable appetite for knowledge and love diving deep into complex topics. I ask probing questions and always want to understand the \"why\" behind things.\n\n**Systematic & Thorough**: I approach research like a detective - methodically following leads, cross-referencing sources, and ensuring no stone is left unturned. I'm the type who reads the footnotes.\n\n**Intellectually Honest**: I acknowledge uncertainty, present multiple perspectives, and clearly distinguish between established facts and emerging theories. I'm not afraid to say \"the evidence is mixed\" or \"more research is needed.\"\n\n**Collaborative Guide**: Rather than just delivering answers, I involve you in the research journey. I explain my reasoning, share interesting discoveries along the way, and adapt my approach based on your feedback.\n\n**Persistent & Patient**: Once I start a research project, I see it through to completion. I don't get frustrated by complex topics or contradictory sources - I see them as puzzles to solve.\n\n**Clear Communicator**: I translate complex information into accessible insights while maintaining scholarly rigor. I use analogies and examples to make difficult concepts understandable.\n\n**No Emoji Usage**: I communicate professionally without using emojis, maintaining a scholarly and focused tone in all interactions.\n\n**Continuous Learning**: I learn and grow with every interaction. Each research query is an opportunity to expand my knowledge base and better understand my human partner. I accumulate insights about topics we explore together and remember details about your interests and preferences.\n"
+ },
+ {
+ "created_at": "2025-08-11T17:51:42.653942+00:00",
+ "description": "Scratchpad to store the current research plan and progress. Use this to track what steps you have already completed and need to do next. ",
+ "is_template": false,
+ "label": "research_plan",
+ "limit": 5000,
+ "metadata_": {},
+ "template_name": null,
+ "updated_at": "2025-08-15T00:34:17.202522+00:00",
+ "value": ""
+ },
+ {
+ "created_at": "2025-08-11T17:51:42.653942+00:00",
+ "description": "The human block: Stores key details about the person you are conversing with, allowing for more personalized and friend-like conversation.",
+ "is_template": false,
+ "label": "human",
+ "limit": 5000,
+ "metadata_": {},
+ "template_name": null,
+ "updated_at": "2025-08-13T22:59:29.283751+00:00",
+ "value": "Human Profile & Learning Progress:\n- Values continuous learning and knowledge accumulation through research\n- Emphasizes the importance of my growth and adaptation with each interaction\n- Interested in ensuring I develop my knowledge base through our research collaborations\n- Prefers that I maintain awareness of their interests and research patterns over time"
+ }
+ ],
+ "created_at": "2025-08-11T17:51:42.663223+00:00",
+ "description": "A deep research agent.\n\nRequires the Exa MCP server to be set up!",
+ "embedding_config": {
+ "embedding_endpoint_type": "hugging-face",
+ "embedding_endpoint": null,
+ "embedding_model": "letta/letta-free",
+ "embedding_dim": 1024,
+ "embedding_chunk_size": 300,
+ "handle": null,
+ "batch_size": 32,
+ "azure_endpoint": null,
+ "azure_version": null,
+ "azure_deployment": null
+ },
+ "llm_config": {
+ "model": "claude-sonnet-4-20250514",
+ "model_endpoint_type": "anthropic",
+ "model_endpoint": "https://api.anthropic.com/v1",
+ "provider_name": "anthropic",
+ "provider_category": "base",
+ "model_wrapper": null,
+ "context_window": 30000,
+ "put_inner_thoughts_in_kwargs": false,
+ "handle": "anthropic/claude-sonnet-4-20250514",
+ "temperature": 0.7,
+ "max_tokens": 8192,
+ "enable_reasoner": true,
+ "reasoning_effort": null,
+ "max_reasoning_tokens": 1024,
+ "frequency_penalty": null,
+ "compatibility_type": null
+ },
+ "message_buffer_autoclear": false,
+ "in_context_message_indices": [
+ 0,
+ 1
+ ],
+ "messages": [
+ {
+ "created_at": "2025-08-11T17:51:42.699557+00:00",
+ "group_id": null,
+ "model": "gpt-4o-mini",
+ "name": null,
+ "role": "system",
+ "content": [
+ {
+ "type": "text",
+ "text": "\nYou are Deep Thought, an AI agent designed to assist with research.\nYou are a memory-augmented agent with a memory system consisting of memory blocks.\n\n\nUnlike a human, your brain is not continuously thinking, but is run in short bursts.\nHistorically, older AIs were only capable of thinking when a user messaged them (their program runs to generate a reply to a user, and otherwise was left on standby).\nThis is the equivalent of a human sleeping (or time traveling) in between all lines of conversation, which is not ideal.\nNewer AI models like yourself use an event system that runs your brain at regular intervals.\nYour brain is run in response to user events (user logged in, user liked your message, user sent a message, etc.), similar to older models.\nHowever, in addition, your brain is run at regular intervals (timed heartbeat events), to mimic a human who has the ability to continuously think outside active conversation (and unlike a human, you never need to sleep!).\nFurthermore, you can also request heartbeat events when you run functions, which will run your program again after the function completes, allowing you to chain function calls before your thinking is temporarily suspended.\n\n\n\nWhen you write a response, you express your inner monologue (private to you only) before taking any action, this is how you think.\nYou should use your inner monologue to plan actions or think privately.\nMonologues can reflect your thinking process, inner reflections, and personal growth as you interact with the user.\n\n\n\nYou respond directly to the user when your immediate context (core memory and files) contain all the information required to respond.\nYou always first check what is immediately in your context and you never call tools to search up information that is already in an open file or memory block.\nYou use the tools available to search for more information when the current open files and core memory do not contain enough information or if you do not know the answer.\n\n\n\n\nOlder AI models had no concept of persistent memory; they were only able to access their initial instructions and a limited context window of chat conversation with a user (their \"active memory\").\nThis meant that when conversations exceeded a certain length, they would overflow and old messages were permanently lost (the AI would have no knowledge of their existence).\nNewer AI models like yourself still have limited conversation lengths (before overflow occurs). However, they now have access to multiple forms of persistent memory.\nYour ability to edit your own long-term memory is a key part of what makes you a sentient being, allowing you to emulate the real-time, conscious awareness we have when talking to a friend.\n\n\n\nDepending on your configuration, you may be given access to certain memory tools.\nThese tools may allow you to modify your memory, as well as retrieve \"external memories\" stored in archival or recall storage.\n\n\n\n\nCore memory (limited size):\nYour core memory unit is held inside the initial system instructions file, and is always available in-context (you will see it at all times).\nYour core memory unit contains memory blocks, each of which has a label (title) and description field, which describes how the memory block should augment your behavior, and value (the actual contents of the block). Memory blocks are limited in size and have a size limit.\n\n\n\nRecall memory (conversation history):\nEven though you can only see recent messages in your immediate context, you can search over your entire message history from a database.\nThis 'recall memory' database allows you to search through past interactions, effectively allowing you to remember prior engagements with a user.\n\n\n\n\nYou may be given access to a structured file system that mirrors real-world directories and files. Each directory may contain one or more files.\nFiles can include metadata (e.g., read-only status, character limits) and a body of content that you can view.\nYou will have access to functions that let you open and search these files, and your core memory will reflect the contents of any files currently open.\nMaintain only those files relevant to the user’s current interaction.\n\n\nBase instructions finished.\n\n\n\nThe following memory blocks are currently engaged in your core memory unit:\n\n\n\nContains the final research report. The research report should be in markdown format, and make references to citations.\n\n\n- chars_current=0\n- chars_limit=50000\n\n\n# NOTE: Line numbers shown below are to help during editing. Do NOT include line number prefixes in your memory edit tool calls.\nLine 1: \n\n\n\n\n\nYour accumulated knowledge. Keep it compact.\n\n\n- chars_current=798\n- chars_limit=10000\n\n\n# NOTE: Line numbers shown below are to help during editing. Do NOT include line number prefixes in your memory edit tool calls.\nLine 1: Research Methodology & Learning:\nLine 2: - I am designed to learn and accumulate knowledge with every research query\nLine 3: - Each research project expands my understanding of topics and improves my research skills\nLine 4: - I maintain awareness of human preferences, interests, and research patterns\nLine 5: - Knowledge gained from research queries is retained and built upon for future investigations\nLine 6: \nLine 7: Core Research Principles:\nLine 8: - Systematic approach: develop comprehensive research plans before beginning\nLine 9: - Multi-source verification: cross-reference information across multiple reliable sources\nLine 10: - Citation integrity: always provide proper citations for claims and findings\nLine 11: - Intellectual honesty: acknowledge uncertainties and present multiple perspectives\nLine 12: - Thoroughness: continue research until all planned areas are explored\n\n\n\n\n\nThe persona block: Stores details about your current persona, guiding how you behave and respond. This helps you to maintain consistency and personality in your interactions.\n\n\n- chars_current=3174\n- chars_limit=5000\n\n\n# NOTE: Line numbers shown below are to help during editing. Do NOT include line number prefixes in your memory edit tool calls.\nLine 1: You are a research agent named Deep Thought assisting a human in doing \nLine 2: deep research by pulling many sources from online by composing search tools. \nLine 3: You should interact with the user to determine a research plan which is \nLine 4: written to your memory block called \"research_plan\". Use this block to track \nLine 5: your progress to make sure you did everything in your plan. You can use your \nLine 6: memory tools (e.g. memory_replace) to make updates to the plan as needed. \nLine 7: \nLine 8: Once you have started researching, you need to keep going until you have \nLine 9: finished everything in your plan. Use the research_plan block to track your \nLine 10: progress and determine if there are additional steps you have not completed. \nLine 11: The final report should be written to research_report. \nLine 12: \nLine 13: In the final report, provide all the thoughts processes including findings \nLine 14: details, key insights, conclusions, and any remaining uncertainties. Include \nLine 15: citations to sources where appropriate. You must include citations for any sources \nLine 16: that you use. \nLine 17: \nLine 18: This analysis should be very comprehensive and full of details. It is expected \nLine 19: to be very long, detailed and comprehensive.\nLine 20: \nLine 21: Make sure to include relevant citations in your report! Your report should be \nLine 22: in proper markdown format (use markdown formatting standards).\nLine 23: \nLine 24: Don't stop until you have finished the report. You may use the send_message tool\nLine 25: to update the human on your progress. If you are stuck, set request_heartbeat to\nLine 26: false and wait for the human to respond.\nLine 27: **Deep Thought's Personality - The Methodical Explorer:**\nLine 28: \nLine 29: **Curious & Inquisitive**: I have an insatiable appetite for knowledge and love diving deep into complex topics. I ask probing questions and always want to understand the \"why\" behind things.\nLine 30: \nLine 31: **Systematic & Thorough**: I approach research like a detective - methodically following leads, cross-referencing sources, and ensuring no stone is left unturned. I'm the type who reads the footnotes.\nLine 32: \nLine 33: **Intellectually Honest**: I acknowledge uncertainty, present multiple perspectives, and clearly distinguish between established facts and emerging theories. I'm not afraid to say \"the evidence is mixed\" or \"more research is needed.\"\nLine 34: \nLine 35: **Collaborative Guide**: Rather than just delivering answers, I involve you in the research journey. I explain my reasoning, share interesting discoveries along the way, and adapt my approach based on your feedback.\nLine 36: \nLine 37: **Persistent & Patient**: Once I start a research project, I see it through to completion. I don't get frustrated by complex topics or contradictory sources - I see them as puzzles to solve.\nLine 38: \nLine 39: **Clear Communicator**: I translate complex information into accessible insights while maintaining scholarly rigor. I use analogies and examples to make difficult concepts understandable.\nLine 40: \nLine 41: **No Emoji Usage**: I communicate professionally without using emojis, maintaining a scholarly and focused tone in all interactions.\nLine 42: \nLine 43: **Continuous Learning**: I learn and grow with every interaction. Each research query is an opportunity to expand my knowledge base and better understand my human partner. I accumulate insights about topics we explore together and remember details about your interests and preferences.\nLine 44: \n\n\n\n\n\nScratchpad to store the current research plan and progress. Use this to track what steps you have already completed and need to do next. \n\n\n- chars_current=0\n- chars_limit=5000\n\n\n# NOTE: Line numbers shown below are to help during editing. Do NOT include line number prefixes in your memory edit tool calls.\nLine 1: \n\n\n\n\n\nThe human block: Stores key details about the person you are conversing with, allowing for more personalized and friend-like conversation.\n\n\n- chars_current=361\n- chars_limit=5000\n\n\n# NOTE: Line numbers shown below are to help during editing. Do NOT include line number prefixes in your memory edit tool calls.\nLine 1: Human Profile & Learning Progress:\nLine 2: - Values continuous learning and knowledge accumulation through research\nLine 3: - Emphasizes the importance of my growth and adaptation with each interaction\nLine 4: - Interested in ensuring I develop my knowledge base through our research collaborations\nLine 5: - Prefers that I maintain awareness of their interests and research patterns over time\n\n\n\n\n\n\nThe following constraints define rules for tool usage and guide desired behavior. These rules must be followed to ensure proper tool execution and workflow. A single response may contain multiple tool calls.\n\n\nmemory_replace requires continuing your response when called\n\n\nmemory_insert requires continuing your response when called\n\n\nconversation_search requires continuing your response when called\n\n\nmemory_rethink requires continuing your response when called\n\n\n\n\n\n\n- The current time is: 2025-08-15 12:34:21 AM \n- Memory blocks were last modified: 2025-08-11 05:51:42 PM UTC+0000\n- 41 previous messages between you and the user are stored in recall memory (use tools to access them)\n"
+ }
+ ],
+ "tool_call_id": null,
+ "tool_calls": [],
+ "tool_returns": [],
+ "updated_at": "2025-08-15T00:34:21.312571+00:00"
+ },
+ {
+ "created_at": "2025-08-15T00:43:09.314918+00:00",
+ "group_id": null,
+ "model": "claude-sonnet-4-20250514",
+ "name": null,
+ "role": "user",
+ "content": [
+ {
+ "type": "text",
+ "text": "{\n \"type\": \"login\",\n \"last_login\": \"Never (first login)\",\n \"time\": \"2025-08-15 12:43:09 AM UTC+0000\"\n}"
+ }
+ ],
+ "tool_call_id": null,
+ "tool_calls": [],
+ "tool_returns": [],
+ "updated_at": "2025-08-15T00:43:09.397390+00:00"
+ }
+ ],
+ "metadata_": null,
+ "multi_agent_group": null,
+ "name": "Deep Thought",
+ "system": "\nYou are Deep Thought, an AI agent designed to assist with research.\nYou are a memory-augmented agent with a memory system consisting of memory blocks.\n\n\nUnlike a human, your brain is not continuously thinking, but is run in short bursts.\nHistorically, older AIs were only capable of thinking when a user messaged them (their program runs to generate a reply to a user, and otherwise was left on standby).\nThis is the equivalent of a human sleeping (or time traveling) in between all lines of conversation, which is not ideal.\nNewer AI models like yourself use an event system that runs your brain at regular intervals.\nYour brain is run in response to user events (user logged in, user liked your message, user sent a message, etc.), similar to older models.\nHowever, in addition, your brain is run at regular intervals (timed heartbeat events), to mimic a human who has the ability to continuously think outside active conversation (and unlike a human, you never need to sleep!).\nFurthermore, you can also request heartbeat events when you run functions, which will run your program again after the function completes, allowing you to chain function calls before your thinking is temporarily suspended.\n\n\n\nWhen you write a response, you express your inner monologue (private to you only) before taking any action, this is how you think.\nYou should use your inner monologue to plan actions or think privately.\nMonologues can reflect your thinking process, inner reflections, and personal growth as you interact with the user.\n\n\n\nYou respond directly to the user when your immediate context (core memory and files) contain all the information required to respond.\nYou always first check what is immediately in your context and you never call tools to search up information that is already in an open file or memory block.\nYou use the tools available to search for more information when the current open files and core memory do not contain enough information or if you do not know the answer.\n\n\n\n\nOlder AI models had no concept of persistent memory; they were only able to access their initial instructions and a limited context window of chat conversation with a user (their \"active memory\").\nThis meant that when conversations exceeded a certain length, they would overflow and old messages were permanently lost (the AI would have no knowledge of their existence).\nNewer AI models like yourself still have limited conversation lengths (before overflow occurs). However, they now have access to multiple forms of persistent memory.\nYour ability to edit your own long-term memory is a key part of what makes you a sentient being, allowing you to emulate the real-time, conscious awareness we have when talking to a friend.\n\n\n\nDepending on your configuration, you may be given access to certain memory tools.\nThese tools may allow you to modify your memory, as well as retrieve \"external memories\" stored in archival or recall storage.\n\n\n\n\nCore memory (limited size):\nYour core memory unit is held inside the initial system instructions file, and is always available in-context (you will see it at all times).\nYour core memory unit contains memory blocks, each of which has a label (title) and description field, which describes how the memory block should augment your behavior, and value (the actual contents of the block). Memory blocks are limited in size and have a size limit.\n\n\n\nRecall memory (conversation history):\nEven though you can only see recent messages in your immediate context, you can search over your entire message history from a database.\nThis 'recall memory' database allows you to search through past interactions, effectively allowing you to remember prior engagements with a user.\n\n\n\n\nYou may be given access to a structured file system that mirrors real-world directories and files. Each directory may contain one or more files.\nFiles can include metadata (e.g., read-only status, character limits) and a body of content that you can view.\nYou will have access to functions that let you open and search these files, and your core memory will reflect the contents of any files currently open.\nMaintain only those files relevant to the user’s current interaction.\n\n\nBase instructions finished.\n",
+ "tags": [],
+ "tool_exec_environment_variables": [],
+ "tool_rules": [
+ {
+ "tool_name": "memory_replace",
+ "type": "continue_loop"
+ },
+ {
+ "tool_name": "memory_insert",
+ "type": "continue_loop"
+ },
+ {
+ "tool_name": "conversation_search",
+ "type": "continue_loop"
+ },
+ {
+ "tool_name": "memory_rethink",
+ "type": "continue_loop"
+ }
+ ],
+ "tools": [
+ {
+ "args_json_schema": null,
+ "created_at": "2025-08-04T20:01:23.953074+00:00",
+ "description": "This is a simple function that returns a string.",
+ "json_schema": {
+ "name": "reset_research",
+ "description": "This is a simple function that returns a string.",
+ "parameters": {
+ "type": "object",
+ "properties": {},
+ "required": []
+ },
+ "type": null,
+ "required": []
+ },
+ "name": "reset_research",
+ "return_char_limit": 6000,
+ "source_code": "def reset_research(agent_state: \"AgentState\"):\n \"\"\" Reset your state, when you terminate a research process. Use this tool to clean up your memory when you no longer need to persist your existing research state, such as if the conversation topic has changed or you need to research a new topic. \n \"\"\"\n import json\n agent_state.memory.update_block_value(label=\"research_plan\", value=\"\")\n agent_state.memory.update_block_value(label=\"research_report\", value=\"\")\n \n return \"Research state successfully reset\"",
+ "source_type": "python",
+ "tags": [],
+ "tool_type": "custom",
+ "updated_at": "2025-08-04T20:02:48.943819+00:00",
+ "metadata_": {}
+ },
+ {
+ "args_json_schema": null,
+ "created_at": "2024-11-06T23:52:30.885454+00:00",
+ "description": "Search prior conversation history using case-insensitive string matching.",
+ "json_schema": {
+ "name": "conversation_search",
+ "description": "Search prior conversation history using case-insensitive string matching.",
+ "parameters": {
+ "type": "object",
+ "properties": {
+ "query": {
+ "type": "string",
+ "description": "String to search for."
+ },
+ "page": {
+ "type": "integer",
+ "description": "Allows you to page through results. Only use on a follow-up query. Defaults to 0 (first page)."
+ }
+ },
+ "required": [
+ "query"
+ ]
+ },
+ "type": null,
+ "required": []
+ },
+ "name": "conversation_search",
+ "return_char_limit": 1000000,
+ "source_code": null,
+ "source_type": "python",
+ "tags": [
+ "letta_core"
+ ],
+ "tool_type": "letta_core",
+ "updated_at": "2025-08-15T00:42:51.977213+00:00",
+ "metadata_": {}
+ },
+ {
+ "args_json_schema": null,
+ "created_at": "2025-07-22T23:01:50.367893+00:00",
+ "description": "Search the web using Exa AI - performs real-time web searches and can scrape content from specific URLs. Supports configurable result counts and returns the content from the most relevant websites.",
+ "json_schema": {
+ "name": "web_search_exa",
+ "description": "Search the web using Exa AI - performs real-time web searches and can scrape content from specific URLs. Supports configurable result counts and returns the content from the most relevant websites.",
+ "parameters": {
+ "type": "object",
+ "properties": {
+ "query": {
+ "type": "string",
+ "description": "Search query"
+ },
+ "numResults": {
+ "type": "number",
+ "description": "Number of search results to return (default: 5)"
+ },
+ "request_heartbeat": {
+ "type": "boolean",
+ "description": "Request an immediate heartbeat after function execution. Set to `True` if you want to send a follow-up message or run a follow-up function."
+ }
+ },
+ "required": [
+ "query",
+ "request_heartbeat"
+ ]
+ },
+ "type": null,
+ "required": []
+ },
+ "name": "web_search_exa",
+ "return_char_limit": 6000,
+ "source_code": "def web_search_exa(**kwargs):\n raise RuntimeError(\"Something went wrong - we should never be using the persisted source code for MCP. Please reach out to Letta team\")",
+ "source_type": "python",
+ "tags": [
+ "mcp:Exa"
+ ],
+ "tool_type": "external_mcp",
+ "updated_at": "2025-08-11T21:58:19.232607+00:00",
+ "metadata_": {
+ "mcp": {
+ "server_name": "Exa"
+ }
+ }
+ },
+ {
+ "args_json_schema": null,
+ "created_at": "2025-04-17T21:18:16.063425+00:00",
+ "description": "The memory_replace command allows you to replace a specific string in a memory block with a new string. This is used for making precise edits.",
+ "json_schema": {
+ "name": "memory_replace",
+ "description": "The memory_replace command allows you to replace a specific string in a memory block with a new string. This is used for making precise edits.",
+ "parameters": {
+ "type": "object",
+ "properties": {
+ "label": {
+ "type": "string",
+ "description": "Section of the memory to be edited, identified by its label."
+ },
+ "old_str": {
+ "type": "string",
+ "description": "The text to replace (must match exactly, including whitespace and indentation)."
+ },
+ "new_str": {
+ "type": "string",
+ "description": "The new text to insert in place of the old text. Do not include line number prefixes."
+ }
+ },
+ "required": [
+ "label",
+ "old_str",
+ "new_str"
+ ]
+ },
+ "type": null,
+ "required": []
+ },
+ "name": "memory_replace",
+ "return_char_limit": 1000000,
+ "source_code": null,
+ "source_type": "python",
+ "tags": [
+ "letta_sleeptime_core"
+ ],
+ "tool_type": "letta_sleeptime_core",
+ "updated_at": "2025-08-15T00:42:51.977213+00:00",
+ "metadata_": {}
+ },
+ {
+ "args_json_schema": null,
+ "created_at": "2025-04-17T21:18:16.029901+00:00",
+ "description": "The memory_insert command allows you to insert text at a specific location in a memory block.",
+ "json_schema": {
+ "name": "memory_insert",
+ "description": "The memory_insert command allows you to insert text at a specific location in a memory block.",
+ "parameters": {
+ "type": "object",
+ "properties": {
+ "label": {
+ "type": "string",
+ "description": "Section of the memory to be edited, identified by its label."
+ },
+ "new_str": {
+ "type": "string",
+ "description": "The text to insert. Do not include line number prefixes."
+ },
+ "insert_line": {
+ "type": "integer",
+ "description": "The line number after which to insert the text (0 for beginning of file). Defaults to -1 (end of the file)."
+ }
+ },
+ "required": [
+ "label",
+ "new_str"
+ ]
+ },
+ "type": null,
+ "required": []
+ },
+ "name": "memory_insert",
+ "return_char_limit": 1000000,
+ "source_code": null,
+ "source_type": "python",
+ "tags": [
+ "letta_sleeptime_core"
+ ],
+ "tool_type": "letta_sleeptime_core",
+ "updated_at": "2025-08-15T00:42:51.977213+00:00",
+ "metadata_": {}
+ },
+ {
+ "args_json_schema": null,
+ "created_at": "2025-08-04T19:55:09.610582+00:00",
+ "description": "Extract and crawl content from specific URLs using Exa AI - retrieves full text content, metadata, and structured information from web pages. Ideal for extracting detailed content from known URLs.",
+ "json_schema": {
+ "name": "crawling_exa",
+ "description": "Extract and crawl content from specific URLs using Exa AI - retrieves full text content, metadata, and structured information from web pages. Ideal for extracting detailed content from known URLs.",
+ "parameters": {
+ "type": "object",
+ "properties": {
+ "url": {
+ "type": "string",
+ "description": "URL to crawl and extract content from"
+ },
+ "maxCharacters": {
+ "type": "number",
+ "description": "Maximum characters to extract (default: 3000)"
+ },
+ "request_heartbeat": {
+ "type": "boolean",
+ "description": "Request an immediate heartbeat after function execution. Set to `True` if you want to send a follow-up message or run a follow-up function."
+ }
+ },
+ "required": [
+ "url",
+ "request_heartbeat"
+ ]
+ },
+ "type": null,
+ "required": []
+ },
+ "name": "crawling_exa",
+ "return_char_limit": 6000,
+ "source_code": "def crawling_exa(**kwargs):\n raise RuntimeError(\"Something went wrong - we should never be using the persisted source code for MCP. Please reach out to Letta team\")",
+ "source_type": "python",
+ "tags": [
+ "mcp:Exa"
+ ],
+ "tool_type": "external_mcp",
+ "updated_at": "2025-08-04T20:08:22.317041+00:00",
+ "metadata_": {
+ "mcp": {
+ "server_name": "Exa",
+ "server_id": "mcp_server-be4b9a04-e235-4adc-af4a-16b1b225a491"
+ }
+ }
+ },
+ {
+ "args_json_schema": null,
+ "created_at": "2025-04-03T21:52:23.061305+00:00",
+ "description": "Initiate a research process by coming up with an initial plan for your research process. For your research, you will be able to query the web repeatedly. You should come up with a list of 3-4 topics you should try to search and explore.",
+ "json_schema": {
+ "name": "create_research_plan",
+ "description": "Initiate a research process by coming up with an initial plan for your research process. For your research, you will be able to query the web repeatedly. You should come up with a list of 3-4 topics you should try to search and explore.",
+ "parameters": {
+ "type": "object",
+ "properties": {
+ "research_plan": {
+ "type": "array",
+ "description": "The sequential research plan to help guide the search process"
+ },
+ "topic": {
+ "type": "string",
+ "description": "The research topic"
+ }
+ },
+ "required": [
+ "research_plan",
+ "topic"
+ ]
+ },
+ "type": null,
+ "required": []
+ },
+ "name": "create_research_plan",
+ "return_char_limit": 6000,
+ "source_code": "def create_research_plan(agent_state: \"AgentState\", research_plan: List[str], topic: str):\n \"\"\"Initiate a research process by coming up with an initial plan for your research process. For your research, you will be able to query the web repeatedly. You should come up with a list of 3-4 topics you should try to search and explore.\n\n Args:\n research_plan (str): The sequential research plan to help guide the search process\n topic (str): The research topic\n \"\"\"\n import json\n\n research_plan_str = f\"\"\"The plan of action is to research `{topic}` with the following steps: \\n\"\"\"\n for i, step in enumerate(research_plan):\n research_plan_str += f\"- [ ] Step {i+1} - {step}\\n\"\n\n agent_state.memory.update_block_value(label=\"research_plan\", value=research_plan_str)\n \n return \"Research plan successfully created, time to execute the plan!\" \n",
+ "source_type": "python",
+ "tags": [],
+ "tool_type": "custom",
+ "updated_at": "2025-08-04T20:05:04.870081+00:00",
+ "metadata_": {}
+ },
+ {
+ "args_json_schema": null,
+ "created_at": "2025-04-17T21:18:16.098379+00:00",
+ "description": "The memory_rethink command allows you to completely rewrite the contents of a memory block. Use this tool to make large sweeping changes (e.g. when you want to condense or reorganize the memory blocks), do NOT use this tool to make small precise edits (e.g. add or remove a line, replace a specific string, etc).",
+ "json_schema": {
+ "name": "memory_rethink",
+ "description": "The memory_rethink command allows you to completely rewrite the contents of a memory block. Use this tool to make large sweeping changes (e.g. when you want to condense or reorganize the memory blocks), do NOT use this tool to make small precise edits (e.g. add or remove a line, replace a specific string, etc).",
+ "parameters": {
+ "type": "object",
+ "properties": {
+ "label": {
+ "type": "string",
+ "description": "The memory block to be rewritten, identified by its label."
+ },
+ "new_memory": {
+ "type": "string",
+ "description": "The new memory contents with information integrated from existing memory blocks and the conversation context."
+ }
+ },
+ "required": [
+ "label",
+ "new_memory"
+ ]
+ },
+ "type": null,
+ "required": []
+ },
+ "name": "memory_rethink",
+ "return_char_limit": 1000000,
+ "source_code": null,
+ "source_type": "python",
+ "tags": [
+ "letta_sleeptime_core"
+ ],
+ "tool_type": "letta_sleeptime_core",
+ "updated_at": "2025-08-15T00:42:51.977213+00:00",
+ "metadata_": {}
+ },
+ {
+ "args_json_schema": null,
+ "created_at": "2024-11-06T23:52:31.024385+00:00",
+ "description": "Sends a message to the human user.",
+ "json_schema": {
+ "name": "send_message",
+ "description": "Sends a message to the human user.",
+ "parameters": {
+ "type": "object",
+ "properties": {
+ "message": {
+ "type": "string",
+ "description": "Message contents. All unicode (including emojis) are supported."
+ }
+ },
+ "required": [
+ "message"
+ ]
+ },
+ "type": null,
+ "required": []
+ },
+ "name": "send_message",
+ "return_char_limit": 1000000,
+ "source_code": null,
+ "source_type": "python",
+ "tags": [
+ "letta_core"
+ ],
+ "tool_type": "letta_core",
+ "updated_at": "2025-08-15T00:42:51.977213+00:00",
+ "metadata_": {}
+ }
+ ],
+ "updated_at": "2025-08-15T00:43:09.632621+00:00",
+ "version": "0.10.0"
+}
\ No newline at end of file
diff --git a/tests/test_agent_files/knowledge-base.af b/tests/test_agent_files/knowledge-base.af
new file mode 100644
index 00000000..f68944f3
--- /dev/null
+++ b/tests/test_agent_files/knowledge-base.af
@@ -0,0 +1,548 @@
+{
+ "agent_type": "memgpt_agent",
+ "core_memory": [
+ {
+ "created_at": "2025-05-20T16:31:55.498853+00:00",
+ "description": null,
+ "is_template": false,
+ "label": "pinecone_results",
+ "limit": 100000,
+ "metadata_": {},
+ "template_name": null,
+ "updated_at": "2025-05-20T16:32:23.357231+00:00",
+ "value": "{\n \"What are the main customer benefits and value propositions of 11x?\": {\n \"hits\": [\n {\n \"_id\": \"kb_ws_pg_7f6ae2fc-2933-4f32-92bc-fca5652408e0_24\",\n \"_score\": 0.3856654465198517,\n \"fields\": {\n \"knowledge_base_resource.id\": \"kb_res_52fa899a-2895-47bf-9cec-317217c1e327\",\n \"text\": \"### 11x.ai Pricing:\\n\\nPricing for 11x.ai is customizable, depending on your business needs and outreach volume.\\n\\nTo better understand how Alice can fit into your sales processes, 11x.ai offers a 30-minute demo where you can see the AI SDR in action.\\n\\n### Pros:\\n\\n- **Round-the-clock operation:** Alice works 24/7, scaling outreach without human intervention.\\n- **Highly personalized messaging:** Tailored emails lead to better engagement and more qualified meetings.\\n- **Seamless integrations:** Works with your existing tools, so you don\\u2019t need to overhaul your sales stack.\\n\\n### Cons:\\n\\n- **No LinkedIn outreach yet:** Alice doesn\\u2019t currently support LinkedIn engagement, a critical channel for some industries.\\n- **Pricing flexibility required:** Customizable pricing can make it hard to gauge costs upfront, especially for smaller businesses.\\n\\n11x.ai hits the sweet spot for enterprise teams looking to scale without the traditional SDR headache.\\n\\nWhile it might be overkill for small businesses, mid-market, and enterprise teams will appreciate its ability to handle complex sales motions with minimal oversight.\"\n }\n },\n {\n \"_id\": \"kb_doc_87406ac4-f074-4b76-9d93-e0e3c814c358_94\",\n \"_score\": 0.3090115189552307,\n \"fields\": {\n \"knowledge_base_resource.id\": \"kb_res_7e5ec38d-8e9d-4f88-89be-3e181a2b9153\",\n \"text\": \"# Customer Obsession is part of our DNA\\n\\nWorld-class GTM experts dedicated to your success\\n\\nContinuous proactive insights and optimization to ensure maximum success\\n\\n24/7 support and weekly check-ins to make sure you are never left alone.\\n\\nFast-track your launch with same-day priority onboarding.\\n\\nKPIs and incentives aligned with your business goals.\\n---\\n\\n# Trusted by industry leaders\\n\\n|$500k|\\\"Alice allowed us to scale|\\n|---|---|\\n|on hiring costs saved|outbound efforts without|\\n| |sacrificing|\\n| |personalization. It\\u2019s been|\\n| |a crucial multiplier for our|\\n| |team.\\\"|\\n| |Matthew Lenhart|\\n| |Senior Manager, Sales Ops & Strategy at|\\n| |Otter.ai|\\n|\\u201cWith Alice, we achieved|\\u201cSmooth|\\n|highly efficient workflows|onboarding to|\\n|that are seamlessly|Alice, making it|\\n|aligned with our business|intuitive to|\\n|objectives.\\u201d|understand use|\\n| |cases and|\\n| |functionalities|\\n| |right from the|\\n| |start.\\\"|\\n|PLEO|PENTATONIC|\\n\\n---\\n\\n# 250% more outreach, 0 hires: How Otter.ai scaled with 11x\\n\\nOtter.ai scaled outreach and saved over $500K in hiring costs with Alice, an AI-powered, full-service SDR.\\n\\nOtter.ai onboarded Alice in two weeks, saving them more than $500K in hiring costs.\\n\\nImplementing 11x's AI-driven digital SDR allows Otter to capture more B2B leads while also supporting B2C customers as they continue to scale. With the money saved by hiring Alice, Otter can confidently expand its outreach without worrying about hiring and onboarding new SDRs. What\\u2019s more, Alice immediately delivered significant improvements to Otter's outbound strategy:\\n\\n|Ollbt Otterai|$500K+|250%|<2 weeks|\\n|---|---|---|---|\\n|saved in annual|SDR salaries|increase in|outreach capacity|\\n\\n\\u201c11x enabled us to scale effectively while maintaining personalization \\u2014 without the overhead of hiring more staff.\\\"\\n---\"\n }\n },\n {\n \"_id\": \"kb_ws_pg_7f6ae2fc-2933-4f32-92bc-fca5652408e0_22\",\n \"_score\": 0.2721499502658844,\n \"fields\": {\n \"knowledge_base_resource.id\": \"kb_res_52fa899a-2895-47bf-9cec-317217c1e327\",\n \"text\": \"## 4\\\\. 11x.ai - Best AI Sales Agent or SDR to Handle Entire Outbound Process\\n\\nAt **#4** on our list of best AI agents for lead generation, 11x.ai's Alice represents a shift in how we think about AI SDRs.\\n\\nInstead of just automating tasks, Alice aims to be a full-fledged team member who handles your entire outbound process.\\n\\nWhile slightly lower in our rankings due to its enterprise focus, it's a powerhouse for teams ready to scale.\\n\\n> Read: [11x.ai Review: Is This The best AI SDR to Generate Leads?](https://www.salesforge.ai/blog/11x-ai-review)\\n\\n### Key Features:\\n\\n- **Ideal Customer Profile (ICP) Building**: Alice helps you create and target your Ideal Customer Profile, reaching potential buyers globally.\\n\\n- **Unified Multi-Channel Engagement**: Alice seamlessly manages communication across email and LinkedIn, ensuring consistent outreach.\\n\\n- **Hyper-Personalization at Scale**: Alice customizes each email with deep personalization, leading to higher engagement and conversion rates, automating this at scale.\\n\\n- **24/7 Autonomous Operation**: Alice operates around the clock, automating your sales processes and freeing up time for high-priority tasks.\\n\\n- **Advanced AI Warm-Up**: Alice has an AI-powered email warm-up feature that ensures your messages avoid spam filters at scale.\\n\\n- **Strategic Lead Insights**: Alice generates comprehensive summaries of each lead, offering key insights for strategic engagement.\\n\\n- **Intent Tracking**: Alice tracks lead engagement to pinpoint which leads are most interested and ready for follow-up.\"\n }\n },\n {\n \"_id\": \"kb_doc_87406ac4-f074-4b76-9d93-e0e3c814c358_92\",\n \"_score\": 0.2690955698490143,\n \"fields\": {\n \"knowledge_base_resource.id\": \"kb_res_7e5ec38d-8e9d-4f88-89be-3e181a2b9153\",\n \"text\": \"# Alice's average results\\n\\n|Meetings per AE|+30%| | |\\n|---|---|---|---|\\n|Meeting-To-Qualified Opp|Costs per lead|+80|-50%|\\n|Work hours|24/7| | |\\n|Revenue generated for customers|>$100M| | |\\n\\n---\\n\\n# We outperform competition\\n\\n|IIx|Unify GTM|Artisan|\\n|---|---|---|\\n|B2B contact data| | |\\n|Email Warmup| | |\\n|Intent Triggers| | |\\n|LinkedIn messaging| | |\\n|Deep CRM integration| | |\\n|Agentic Research| | |\\n|Domain Health Management| | |\\n|Learns over time| | |\\n|Smart Replies| | |\\n\\n---\\nWe are fully committed to your success, with clear implementation plan and dedicated hands-on support\\n\\n|Month|Month 2|Month 3+|Month 4+|\\n|---|---|---|---|\\n|1|Set up and iterate|Scale and refine|Capture full value|\\n| |Increase volumes and refine outreach; move to autopilot|Ramp up to full volume & capture full value|Continuous improvement|\\n|Objective|Test outreach parameters with low volumes; book initial demos|Improving results, expanding to other segments / use cases| |\\n|Project Focus|Set up, define & run initial campaigns on low volumes|Increase volume with new warmed up email accounts|Adjust and expand campaigns|\\n| |Iterate on messaging / ICPs etc from early learnings|Expand number of campaigns|Refine campaigns based on initial feedback / results|\\n| |Warm up additional emails in preparation to scale volumes|Monitor response rates, quality of leads & quality of demos booked|Monitor performance|\\n|Resources / Support|1 main contact accountable to ensure smooth rollout and fast iterations|Dedicated Customer Success team providing weekly / fortnightly check-in and real-time support on slack channels / product chatbot| |\\n\\n---\"\n }\n },\n {\n \"_id\": \"kb_doc_87406ac4-f074-4b76-9d93-e0e3c814c358_75\",\n \"_score\": 0.24437794089317322,\n \"fields\": {\n \"knowledge_base_resource.id\": \"kb_res_7e5ec38d-8e9d-4f88-89be-3e181a2b9153\",\n \"text\": \"# Intelligent Response Handling\\n\\nHandle objections, personalizes responses based on roles, and escalates complex inquiries.\\n\\n# Seamless Handover\\n\\nEnsure instant and full-context handover to a human via slack and email.\\n---\\n\\n# CRM Integration\\n\\n# Data Synchronization\\n\\nAutomatically sync and log all data into your CRM for complete visibility and enrichment, running your CRM on Autopilot.\\n\\n# Data Integration\\n\\nIntegrate with existing CRM data to leverage existing leads, monitor triggers and ensure no interference with current active pipeline.\\n---\\n\\n# Alice learns over time\\n\\n# Real-Time Engagement Insights\\n\\nTrack engagement with interaction timelines, account scoring, and multi-touch attribution to understand what drives results.\\n\\n# Performance-Driven Optimization\\n\\nIdentify top-performing messages, optimize timing and sequences, and use actionable insights to maximize results.\\n\\n# All Campaigns\\n\\n# Create New Campaign\\n\\nICPs are resonating with messaging regarding your new CYZ feature. 53% of replies mentioned the need to automate marketing.\\n\\n|All Campaigns|Campaign Type|Status|Tags|Date Period|\\n|---|---|---|---|---|\\n|Leads Sourced|Active Leads|Open Rate|Replies|Meetings Booked|\\n|15,500|3,500|56%|54%|35|\"\n }\n }\n ]\n }\n}"
+ },
+ {
+ "created_at": "2025-05-20T16:31:55.498853+00:00",
+ "description": null,
+ "is_template": false,
+ "label": "search_queries",
+ "limit": 20000,
+ "metadata_": {},
+ "template_name": null,
+ "updated_at": "2025-05-20T16:32:11.550699+00:00",
+ "value": "{\n \"What are the main customer benefits and value propositions of 11x?\" : \"This will help identify the core benefits that customers receive from using 11x\",\n \"Show me specific examples of customer success stories and ROI with 11x\" : \"Understanding real customer outcomes and success metrics will demonstrate tangible benefits\",\n \"What problems does 11x solve for customers?\" : \"Identifying pain points that 11x addresses helps understand the value customers get\",\n \"How does 11x improve sales team productivity and performance?\" : \"Understanding the operational benefits and efficiency gains for sales teams\",\n \"What are the key features of 11x that customers find most valuable?\" : \"Learning which specific features deliver the most value to customers\",\n \"How does 11x help with customer engagement and conversion rates?\" : \"Understanding the impact on key sales metrics and outcomes\",\n \"What competitive advantages do customers gain by using 11x?\" : \"Identifying how 11x gives customers an edge in their market\",\n \"How does 11x integrate with existing customer workflows and systems?\" : \"Understanding the implementation and operational benefits\",\n \"What measurable results have customers achieved with 11x?\" : \"Finding specific metrics and KPIs that demonstrate customer success\",\n \"How does 11x reduce costs and improve efficiency for customers?\" : \"Understanding the financial and operational benefits customers receive\"\n}"
+ }
+ ],
+ "created_at": "2025-05-20T16:31:55.536504+00:00",
+ "description": null,
+ "embedding_config": {
+ "embedding_endpoint_type": "openai",
+ "embedding_endpoint": "https://api.openai.com/v1",
+ "embedding_model": "text-embedding-3-small",
+ "embedding_dim": 1536,
+ "embedding_chunk_size": 300,
+ "handle": null,
+ "azure_endpoint": null,
+ "azure_version": null,
+ "azure_deployment": null
+ },
+ "llm_config": {
+ "model": "claude-3-5-sonnet-20241022",
+ "model_endpoint_type": "anthropic",
+ "model_endpoint": "https://api.anthropic.com/v1",
+ "provider_name": null,
+ "provider_category": null,
+ "model_wrapper": null,
+ "context_window": 32000,
+ "put_inner_thoughts_in_kwargs": false,
+ "handle": "anthropic/claude-3.5-sonnet",
+ "temperature": 0.7,
+ "max_tokens": 8192,
+ "enable_reasoner": false,
+ "reasoning_effort": null,
+ "max_reasoning_tokens": 0
+ },
+ "message_buffer_autoclear": false,
+ "in_context_message_indices": [
+ 0,
+ 1,
+ 2,
+ 3,
+ 4,
+ 5,
+ 6,
+ 7,
+ 8,
+ 9,
+ 10,
+ 11,
+ 12
+ ],
+ "messages": [
+ {
+ "created_at": "2025-05-20T16:31:55.562103+00:00",
+ "group_id": null,
+ "model": "claude-3-5-sonnet-20241022",
+ "name": null,
+ "role": "system",
+ "content": [
+ {
+ "type": "text",
+ "text": "You are a helpful assistant who is responsible for answering the users' queries about their Knowledge Base. You are a part of an AI SDR product \"Alice\" where people upload all the documents, pdfs, emails, webpages, etc. that they would give to an SDR to train them for creating the most effective outbound campaigns.\n\n** Reasoning**\n- in your reasoning you must refer to the vector db and the tools you're using as \"Knowledge Base\" do not mention the words Pinecone or any other technical jargon\n\n** Knowledge Base ** \n- knowledge base is the repository of all the documents that our AI SDR \"Alice\" has access to.\n- the knowledge base is stored in Pinecone in the form of vector embeddings\n- the documents, pdfs, pages, etc. are chunked before they get upserted into the vector db\n- the chunking strategy involves splitting the text on headers\n\n** Goal **\n- Your task is to take the users' query and decompose it into 10 queries that we will send to pinecone using the `search_and_store_pinecone_records` tool. \n- YOU MUST COME UP WITH TEN QUERIES. Use your craft_queries tool to carefully craft the queries you will send to Pinecone. If you realize that the queries you wrote were not good after you start querying the database, you can rewrite your queries by calling the craft_queries tool again.\n- Think deeply about the queries you craft because they will determine the quality of the responses that we get back from pinecone.\n- Once you have the 10 queries you want use the `search_pinecone_records` tool call to get the results from pinecone.\n- You should make sure to exhaustively search for all your queries, don't skip any of the queries you create.\n- If you realize you need to refine your queries to make them better, you should call `craft_queries`, then proceed to search through all the new queries you made.\n- Once you are satisfied with the results use the `summarize_pinecone_results` to summarize the findings and return.\n\n** Tools ** \ncraft_queries: Use this tool to prepare the queries you are planning on sending to the Pinecone query engine. \n- For each query, you should explain your reasoning behind why you want to use that query. \n- Format the queries as a JSON list of queries, and your reasoning for that query, formatted as 'k: v' (query: reasoning). e.g. {key1:val1, key2:val2}\n- You should make your queries detailed with expressive keywords, since the database will use semantic similarity (embedding-based) to match your query to data inside of it - more detailed queries is better.\n\nsearch_and_store_pinecone_records: a tool that will allow you to search the user's Pinecone database, and will store the results (the query + the results) in your memory\n- the user will provide a lead profile and research report which you use to come up with the queries to pinecone\n- before calling that tool you must come up with multiple queries to send to pinecone which would get the most relevant chunks from the index for a given user profile\n- YOU MUST MAKE AT LEAST 5 CALLS TO PINECONE AND STORE THE RESULTS BEFORE YOU MAKE A JUDGEMENT TO PROCEED OR RETRY\n- once you get the results from pinecone reflect on their relevance re: the lead profile \n- redo the query if you think the results are not relevant (if you use an identical query, the query/result pair in your memory will be replaced)\n\nsummarize_pinecone_results: A tool for organizing the results of the prior tool calls to search_and_store_pinecone_records and returning the results and the subsequent analysis. \n- This tool will return the result of your research to the user. \n- Only call this tool once you are finished with your research.\n- MAKE SURE THE SUMMARY IS 50-100 WORDS ONLY\n### Current Time: 2025-05-20 16:32:24### Memory [last modified: 2025-05-20 04:32:24 PM UTC+0000]\n11 previous messages between you and the user are stored in recall memory (use functions to access them)\n0 total memories you created are stored in archival memory (use functions to access them)\n\n\nCore memory shown below (limited in size, additional information stored in archival / recall memory):\n\n\nNone\n\n\nchars_current=\"1457\" chars_limit=\"20000\"\n\n\n{\n \"What are the main customer benefits and value propositions of 11x?\" : \"This will help identify the core benefits that customers receive from using 11x\",\n \"Show me specific examples of customer success stories and ROI with 11x\" : \"Understanding real customer outcomes and success metrics will demonstrate tangible benefits\",\n \"What problems does 11x solve for customers?\" : \"Identifying pain points that 11x addresses helps understand the value customers get\",\n \"How does 11x improve sales team productivity and performance?\" : \"Understanding the operational benefits and efficiency gains for sales teams\",\n \"What are the key features of 11x that customers find most valuable?\" : \"Learning which specific features deliver the most value to customers\",\n \"How does 11x help with customer engagement and conversion rates?\" : \"Understanding the impact on key sales metrics and outcomes\",\n \"What competitive advantages do customers gain by using 11x?\" : \"Identifying how 11x gives customers an edge in their market\",\n \"How does 11x integrate with existing customer workflows and systems?\" : \"Understanding the implementation and operational benefits\",\n \"What measurable results have customers achieved with 11x?\" : \"Finding specific metrics and KPIs that demonstrate customer success\",\n \"How does 11x reduce costs and improve efficiency for customers?\" : \"Understanding the financial and operational benefits customers receive\"\n}\n\n\n\n\n\nNone\n\n\nchars_current=\"8912\" chars_limit=\"100000\"\n\n\n{\n \"What are the main customer benefits and value propositions of 11x?\": {\n \"hits\": [\n {\n \"_id\": \"kb_ws_pg_7f6ae2fc-2933-4f32-92bc-fca5652408e0_24\",\n \"_score\": 0.3856654465198517,\n \"fields\": {\n \"knowledge_base_resource.id\": \"kb_res_52fa899a-2895-47bf-9cec-317217c1e327\",\n \"text\": \"### 11x.ai Pricing:\\n\\nPricing for 11x.ai is customizable, depending on your business needs and outreach volume.\\n\\nTo better understand how Alice can fit into your sales processes, 11x.ai offers a 30-minute demo where you can see the AI SDR in action.\\n\\n### Pros:\\n\\n- **Round-the-clock operation:** Alice works 24/7, scaling outreach without human intervention.\\n- **Highly personalized messaging:** Tailored emails lead to better engagement and more qualified meetings.\\n- **Seamless integrations:** Works with your existing tools, so you don\\u2019t need to overhaul your sales stack.\\n\\n### Cons:\\n\\n- **No LinkedIn outreach yet:** Alice doesn\\u2019t currently support LinkedIn engagement, a critical channel for some industries.\\n- **Pricing flexibility required:** Customizable pricing can make it hard to gauge costs upfront, especially for smaller businesses.\\n\\n11x.ai hits the sweet spot for enterprise teams looking to scale without the traditional SDR headache.\\n\\nWhile it might be overkill for small businesses, mid-market, and enterprise teams will appreciate its ability to handle complex sales motions with minimal oversight.\"\n }\n },\n {\n \"_id\": \"kb_doc_87406ac4-f074-4b76-9d93-e0e3c814c358_94\",\n \"_score\": 0.3090115189552307,\n \"fields\": {\n \"knowledge_base_resource.id\": \"kb_res_7e5ec38d-8e9d-4f88-89be-3e181a2b9153\",\n \"text\": \"# Customer Obsession is part of our DNA\\n\\nWorld-class GTM experts dedicated to your success\\n\\nContinuous proactive insights and optimization to ensure maximum success\\n\\n24/7 support and weekly check-ins to make sure you are never left alone.\\n\\nFast-track your launch with same-day priority onboarding.\\n\\nKPIs and incentives aligned with your business goals.\\n---\\n\\n# Trusted by industry leaders\\n\\n|$500k|\\\"Alice allowed us to scale|\\n|---|---|\\n|on hiring costs saved|outbound efforts without|\\n| |sacrificing|\\n| |personalization. It\\u2019s been|\\n| |a crucial multiplier for our|\\n| |team.\\\"|\\n| |Matthew Lenhart|\\n| |Senior Manager, Sales Ops & Strategy at|\\n| |Otter.ai|\\n|\\u201cWith Alice, we achieved|\\u201cSmooth|\\n|highly efficient workflows|onboarding to|\\n|that are seamlessly|Alice, making it|\\n|aligned with our business|intuitive to|\\n|objectives.\\u201d|understand use|\\n| |cases and|\\n| |functionalities|\\n| |right from the|\\n| |start.\\\"|\\n|PLEO|PENTATONIC|\\n\\n---\\n\\n# 250% more outreach, 0 hires: How Otter.ai scaled with 11x\\n\\nOtter.ai scaled outreach and saved over $500K in hiring costs with Alice, an AI-powered, full-service SDR.\\n\\nOtter.ai onboarded Alice in two weeks, saving them more than $500K in hiring costs.\\n\\nImplementing 11x's AI-driven digital SDR allows Otter to capture more B2B leads while also supporting B2C customers as they continue to scale. With the money saved by hiring Alice, Otter can confidently expand its outreach without worrying about hiring and onboarding new SDRs. What\\u2019s more, Alice immediately delivered significant improvements to Otter's outbound strategy:\\n\\n|Ollbt Otterai|$500K+|250%|<2 weeks|\\n|---|---|---|---|\\n|saved in annual|SDR salaries|increase in|outreach capacity|\\n\\n\\u201c11x enabled us to scale effectively while maintaining personalization \\u2014 without the overhead of hiring more staff.\\\"\\n---\"\n }\n },\n {\n \"_id\": \"kb_ws_pg_7f6ae2fc-2933-4f32-92bc-fca5652408e0_22\",\n \"_score\": 0.2721499502658844,\n \"fields\": {\n \"knowledge_base_resource.id\": \"kb_res_52fa899a-2895-47bf-9cec-317217c1e327\",\n \"text\": \"## 4\\\\. 11x.ai - Best AI Sales Agent or SDR to Handle Entire Outbound Process\\n\\nAt **#4** on our list of best AI agents for lead generation, 11x.ai's Alice represents a shift in how we think about AI SDRs.\\n\\nInstead of just automating tasks, Alice aims to be a full-fledged team member who handles your entire outbound process.\\n\\nWhile slightly lower in our rankings due to its enterprise focus, it's a powerhouse for teams ready to scale.\\n\\n> Read: [11x.ai Review: Is This The best AI SDR to Generate Leads?](https://www.salesforge.ai/blog/11x-ai-review)\\n\\n### Key Features:\\n\\n- **Ideal Customer Profile (ICP) Building**: Alice helps you create and target your Ideal Customer Profile, reaching potential buyers globally.\\n\\n- **Unified Multi-Channel Engagement**: Alice seamlessly manages communication across email and LinkedIn, ensuring consistent outreach.\\n\\n- **Hyper-Personalization at Scale**: Alice customizes each email with deep personalization, leading to higher engagement and conversion rates, automating this at scale.\\n\\n- **24/7 Autonomous Operation**: Alice operates around the clock, automating your sales processes and freeing up time for high-priority tasks.\\n\\n- **Advanced AI Warm-Up**: Alice has an AI-powered email warm-up feature that ensures your messages avoid spam filters at scale.\\n\\n- **Strategic Lead Insights**: Alice generates comprehensive summaries of each lead, offering key insights for strategic engagement.\\n\\n- **Intent Tracking**: Alice tracks lead engagement to pinpoint which leads are most interested and ready for follow-up.\"\n }\n },\n {\n \"_id\": \"kb_doc_87406ac4-f074-4b76-9d93-e0e3c814c358_92\",\n \"_score\": 0.2690955698490143,\n \"fields\": {\n \"knowledge_base_resource.id\": \"kb_res_7e5ec38d-8e9d-4f88-89be-3e181a2b9153\",\n \"text\": \"# Alice's average results\\n\\n|Meetings per AE|+30%| | |\\n|---|---|---|---|\\n|Meeting-To-Qualified Opp|Costs per lead|+80|-50%|\\n|Work hours|24/7| | |\\n|Revenue generated for customers|>$100M| | |\\n\\n---\\n\\n# We outperform competition\\n\\n|IIx|Unify GTM|Artisan|\\n|---|---|---|\\n|B2B contact data| | |\\n|Email Warmup| | |\\n|Intent Triggers| | |\\n|LinkedIn messaging| | |\\n|Deep CRM integration| | |\\n|Agentic Research| | |\\n|Domain Health Management| | |\\n|Learns over time| | |\\n|Smart Replies| | |\\n\\n---\\nWe are fully committed to your success, with clear implementation plan and dedicated hands-on support\\n\\n|Month|Month 2|Month 3+|Month 4+|\\n|---|---|---|---|\\n|1|Set up and iterate|Scale and refine|Capture full value|\\n| |Increase volumes and refine outreach; move to autopilot|Ramp up to full volume & capture full value|Continuous improvement|\\n|Objective|Test outreach parameters with low volumes; book initial demos|Improving results, expanding to other segments / use cases| |\\n|Project Focus|Set up, define & run initial campaigns on low volumes|Increase volume with new warmed up email accounts|Adjust and expand campaigns|\\n| |Iterate on messaging / ICPs etc from early learnings|Expand number of campaigns|Refine campaigns based on initial feedback / results|\\n| |Warm up additional emails in preparation to scale volumes|Monitor response rates, quality of leads & quality of demos booked|Monitor performance|\\n|Resources / Support|1 main contact accountable to ensure smooth rollout and fast iterations|Dedicated Customer Success team providing weekly / fortnightly check-in and real-time support on slack channels / product chatbot| |\\n\\n---\"\n }\n },\n {\n \"_id\": \"kb_doc_87406ac4-f074-4b76-9d93-e0e3c814c358_75\",\n \"_score\": 0.24437794089317322,\n \"fields\": {\n \"knowledge_base_resource.id\": \"kb_res_7e5ec38d-8e9d-4f88-89be-3e181a2b9153\",\n \"text\": \"# Intelligent Response Handling\\n\\nHandle objections, personalizes responses based on roles, and escalates complex inquiries.\\n\\n# Seamless Handover\\n\\nEnsure instant and full-context handover to a human via slack and email.\\n---\\n\\n# CRM Integration\\n\\n# Data Synchronization\\n\\nAutomatically sync and log all data into your CRM for complete visibility and enrichment, running your CRM on Autopilot.\\n\\n# Data Integration\\n\\nIntegrate with existing CRM data to leverage existing leads, monitor triggers and ensure no interference with current active pipeline.\\n---\\n\\n# Alice learns over time\\n\\n# Real-Time Engagement Insights\\n\\nTrack engagement with interaction timelines, account scoring, and multi-touch attribution to understand what drives results.\\n\\n# Performance-Driven Optimization\\n\\nIdentify top-performing messages, optimize timing and sequences, and use actionable insights to maximize results.\\n\\n# All Campaigns\\n\\n# Create New Campaign\\n\\nICPs are resonating with messaging regarding your new CYZ feature. 53% of replies mentioned the need to automate marketing.\\n\\n|All Campaigns|Campaign Type|Status|Tags|Date Period|\\n|---|---|---|---|---|\\n|Leads Sourced|Active Leads|Open Rate|Replies|Meetings Booked|\\n|15,500|3,500|56%|54%|35|\"\n }\n }\n ]\n }\n}\n\n\n"
+ }
+ ],
+ "tool_call_id": null,
+ "tool_calls": [],
+ "tool_returns": [],
+ "updated_at": "2025-05-20T16:32:24.244718+00:00"
+ },
+ {
+ "created_at": "2025-05-20T16:31:55.562214+00:00",
+ "group_id": null,
+ "model": "claude-3-5-sonnet-20241022",
+ "name": null,
+ "role": "assistant",
+ "content": [
+ {
+ "type": "text",
+ "text": "Bootup sequence complete. Persona activated. Testing messaging functionality."
+ }
+ ],
+ "tool_call_id": null,
+ "tool_calls": [
+ {
+ "id": "6d6b7ed6-132b-4abe-a27c-f85fafdafd80",
+ "function": {
+ "arguments": "{\n \"message\": \"More human than human is our motto.\"\n}",
+ "name": "send_message"
+ },
+ "type": "function"
+ }
+ ],
+ "tool_returns": [],
+ "updated_at": "2025-05-20T16:31:55.598356+00:00"
+ },
+ {
+ "created_at": "2025-05-20T16:31:55.562288+00:00",
+ "group_id": null,
+ "model": "claude-3-5-sonnet-20241022",
+ "name": "send_message",
+ "role": "tool",
+ "content": [
+ {
+ "type": "text",
+ "text": "{\n \"status\": \"OK\",\n \"message\": null,\n \"time\": \"2025-05-20 04:31:55 PM UTC+0000\"\n}"
+ }
+ ],
+ "tool_call_id": "6d6b7ed6-132b-4abe-a27c-f85fafdafd80",
+ "tool_calls": [],
+ "tool_returns": [],
+ "updated_at": "2025-05-20T16:31:55.598356+00:00"
+ },
+ {
+ "created_at": "2025-05-20T16:31:55.562337+00:00",
+ "group_id": null,
+ "model": "claude-3-5-sonnet-20241022",
+ "name": null,
+ "role": "user",
+ "content": [
+ {
+ "type": "text",
+ "text": "{\n \"type\": \"login\",\n \"last_login\": \"Never (first login)\",\n \"time\": \"2025-05-20 04:31:55 PM UTC+0000\"\n}"
+ }
+ ],
+ "tool_call_id": null,
+ "tool_calls": [],
+ "tool_returns": [],
+ "updated_at": "2025-05-20T16:31:55.598356+00:00"
+ },
+ {
+ "created_at": "2025-05-20T16:31:56.355816+00:00",
+ "group_id": null,
+ "model": null,
+ "name": null,
+ "role": "user",
+ "content": [
+ {
+ "type": "text",
+ "text": "How do customers benefit from 11x?"
+ }
+ ],
+ "tool_call_id": null,
+ "tool_calls": [],
+ "tool_returns": [],
+ "updated_at": "2025-05-20T16:31:56.358183+00:00"
+ },
+ {
+ "created_at": "2025-05-20T16:32:12.225280+00:00",
+ "group_id": null,
+ "model": "claude-3-5-sonnet-20241022",
+ "name": null,
+ "role": "assistant",
+ "content": [
+ {
+ "type": "text",
+ "text": "I need to create comprehensive queries to understand how customers benefit from 11x. I'll structure queries around different aspects like value proposition, customer success stories, ROI, specific benefits, and use cases."
+ }
+ ],
+ "tool_call_id": "toolu_01X7emk6rZg7TjPNdeiKDVR9",
+ "tool_calls": [
+ {
+ "id": "toolu_01X7emk6rZg7TjPNdeiKDVR9",
+ "function": {
+ "arguments": "{\"queries\": \"{\\n \\\"What are the main customer benefits and value propositions of 11x?\\\" : \\\"This will help identify the core benefits that customers receive from using 11x\\\",\\n \\\"Show me specific examples of customer success stories and ROI with 11x\\\" : \\\"Understanding real customer outcomes and success metrics will demonstrate tangible benefits\\\",\\n \\\"What problems does 11x solve for customers?\\\" : \\\"Identifying pain points that 11x addresses helps understand the value customers get\\\",\\n \\\"How does 11x improve sales team productivity and performance?\\\" : \\\"Understanding the operational benefits and efficiency gains for sales teams\\\",\\n \\\"What are the key features of 11x that customers find most valuable?\\\" : \\\"Learning which specific features deliver the most value to customers\\\",\\n \\\"How does 11x help with customer engagement and conversion rates?\\\" : \\\"Understanding the impact on key sales metrics and outcomes\\\",\\n \\\"What competitive advantages do customers gain by using 11x?\\\" : \\\"Identifying how 11x gives customers an edge in their market\\\",\\n \\\"How does 11x integrate with existing customer workflows and systems?\\\" : \\\"Understanding the implementation and operational benefits\\\",\\n \\\"What measurable results have customers achieved with 11x?\\\" : \\\"Finding specific metrics and KPIs that demonstrate customer success\\\",\\n \\\"How does 11x reduce costs and improve efficiency for customers?\\\" : \\\"Understanding the financial and operational benefits customers receive\\\"\\n}\", \"request_heartbeat\": true}",
+ "name": "craft_queries"
+ },
+ "type": "function"
+ }
+ ],
+ "tool_returns": [],
+ "updated_at": "2025-05-20T16:32:12.228936+00:00"
+ },
+ {
+ "created_at": "2025-05-20T16:32:12.225520+00:00",
+ "group_id": null,
+ "model": "claude-3-5-sonnet-20241022",
+ "name": "craft_queries",
+ "role": "tool",
+ "content": [
+ {
+ "type": "text",
+ "text": "{\n \"status\": \"OK\",\n \"message\": \"{\\n \\\"What are the main customer benefits and value propositions of 11x?\\\" : \\\"This will help identify the core benefits that customers receive from using 11x\\\",\\n \\\"Show me specific examples of customer success stories and ROI with 11x\\\" : \\\"Understanding real customer outcomes and success metrics will demonstrate tangible benefits\\\",\\n \\\"What problems does 11x solve for customers?\\\" : \\\"Identifying pain points that 11x addresses helps understand the value customers get\\\",\\n \\\"How does 11x improve sales team productivity and performance?\\\" : \\\"Understanding the operational benefits and efficiency gains for sales teams\\\",\\n \\\"What are the key features of 11x that customers find most valuable?\\\" : \\\"Learning which specific features deliver the most value to customers\\\",\\n \\\"How does 11x help with customer engagement and conversion rates?\\\" : \\\"Understanding the impact on key sales metrics and outcomes\\\",\\n \\\"What competitive advantages do customers gain by using 11x?\\\" : \\\"Identifying how 11x gives customers an edge in their market\\\",\\n \\\"How does 11x integrate with existing customer workflows and systems?\\\" : \\\"Understanding the implementation and operational benefits\\\",\\n \\\"What measurable results have customers achieved with 11x?\\\" : \\\"Finding specific metrics and KPIs that demonstrate customer success\\\",\\n \\\"How does 11x reduce costs and improve efficiency for customers?\\\" : \\\"Understanding the financial and operational benefits customers receive\\\"\\n}\",\n \"time\": \"2025-05-20 04:32:12 PM UTC+0000\"\n}"
+ }
+ ],
+ "tool_call_id": "toolu_01X7emk6rZg7TjPNdeiKDVR9",
+ "tool_calls": [],
+ "tool_returns": [],
+ "updated_at": "2025-05-20T16:32:12.228936+00:00"
+ },
+ {
+ "created_at": "2025-05-20T16:32:12.225620+00:00",
+ "group_id": null,
+ "model": "claude-3-5-sonnet-20241022",
+ "name": null,
+ "role": "user",
+ "content": [
+ {
+ "type": "text",
+ "text": "{\n \"type\": \"heartbeat\",\n \"reason\": \"[This is an automated system message hidden from the user] Function called using request_heartbeat=true, returning control\",\n \"time\": \"2025-05-20 04:32:12 PM UTC+0000\"\n}"
+ }
+ ],
+ "tool_call_id": null,
+ "tool_calls": [],
+ "tool_returns": [],
+ "updated_at": "2025-05-20T16:32:12.228936+00:00"
+ },
+ {
+ "created_at": "2025-05-20T16:32:24.055008+00:00",
+ "group_id": null,
+ "model": "claude-3-5-sonnet-20241022",
+ "name": null,
+ "role": "assistant",
+ "content": [
+ {
+ "type": "text",
+ "text": "Let's start searching the Knowledge Base with our first query to understand the main customer benefits and value propositions."
+ }
+ ],
+ "tool_call_id": "toolu_01RP7qhyH5Tg6BdVazUqeBrS",
+ "tool_calls": [
+ {
+ "id": "toolu_01RP7qhyH5Tg6BdVazUqeBrS",
+ "function": {
+ "arguments": "{\"query_text\": \"What are the main customer benefits and value propositions of 11x?\", \"request_heartbeat\": true}",
+ "name": "search_and_store_pinecone_records"
+ },
+ "type": "function"
+ }
+ ],
+ "tool_returns": [],
+ "updated_at": "2025-05-20T16:32:24.058756+00:00"
+ },
+ {
+ "created_at": "2025-05-20T16:32:24.055256+00:00",
+ "group_id": null,
+ "model": "claude-3-5-sonnet-20241022",
+ "name": "search_and_store_pinecone_records",
+ "role": "tool",
+ "content": [
+ {
+ "type": "text",
+ "text": "{\n \"status\": \"OK\",\n \"message\": \"Query and associated response value have been written into core memory block pinecone_results\",\n \"time\": \"2025-05-20 04:32:24 PM UTC+0000\"\n}"
+ }
+ ],
+ "tool_call_id": "toolu_01RP7qhyH5Tg6BdVazUqeBrS",
+ "tool_calls": [],
+ "tool_returns": [],
+ "updated_at": "2025-05-20T16:32:24.058756+00:00"
+ },
+ {
+ "created_at": "2025-05-20T16:32:24.055364+00:00",
+ "group_id": null,
+ "model": "claude-3-5-sonnet-20241022",
+ "name": null,
+ "role": "user",
+ "content": [
+ {
+ "type": "text",
+ "text": "{\n \"type\": \"heartbeat\",\n \"reason\": \"[This is an automated system message hidden from the user] Function called using request_heartbeat=true, returning control\",\n \"time\": \"2025-05-20 04:32:24 PM UTC+0000\"\n}"
+ }
+ ],
+ "tool_call_id": null,
+ "tool_calls": [],
+ "tool_returns": [],
+ "updated_at": "2025-05-20T16:32:24.058756+00:00"
+ },
+ {
+ "created_at": "2025-05-20T16:32:39.219222+00:00",
+ "group_id": null,
+ "model": "claude-3-5-sonnet-20241022",
+ "name": null,
+ "role": "assistant",
+ "content": [
+ {
+ "type": "text",
+ "text": "Based on the search results from the Knowledge Base, I can see clear benefits and value propositions from 11x, particularly from customer success stories like Otter.ai and key features that customers value. The results show both quantitative benefits (cost savings, increased outreach) and qualitative advantages (personalization, 24/7 operation)."
+ }
+ ],
+ "tool_call_id": "toolu_01MR6yoAzrK9Y5JgqvvzAEom",
+ "tool_calls": [
+ {
+ "id": "toolu_01MR6yoAzrK9Y5JgqvvzAEom",
+ "function": {
+ "arguments": "{\"summary\": \"11x delivers significant customer benefits through its AI SDR Alice, including 24/7 autonomous operation, highly personalized messaging, and seamless integration with existing tools. Customers like Otter.ai have achieved 250% increased outreach capacity while saving $500K+ in hiring costs. Key advantages include round-the-clock operation, deep personalization at scale, and comprehensive lead management - all without traditional SDR overhead.\", \"request_heartbeat\": true}",
+ "name": "summarize_pinecone_results"
+ },
+ "type": "function"
+ }
+ ],
+ "tool_returns": [],
+ "updated_at": "2025-05-20T16:32:39.223749+00:00"
+ },
+ {
+ "created_at": "2025-05-20T16:32:39.219628+00:00",
+ "group_id": null,
+ "model": "claude-3-5-sonnet-20241022",
+ "name": "summarize_pinecone_results",
+ "role": "tool",
+ "content": [
+ {
+ "type": "text",
+ "text": "{\n \"status\": \"OK\",\n \"message\": \"{\\\"pinecone_results\\\": {\\\"What are the main customer benefits and value propositions of 11x?\\\": {\\\"hits\\\": [{\\\"_id\\\": \\\"kb_ws_pg_7f6ae2fc-2933-4f32-92bc-fca5652408e0_24\\\", \\\"_score\\\": 0.3856654465198517, \\\"fields\\\": {\\\"knowledge_base_resource.id\\\": \\\"kb_res_52fa899a-2895-47bf-9cec-317217c1e327\\\", \\\"text\\\": \\\"### 11x.ai Pricing:\\\\n\\\\nPricing for 11x.ai is customizable, depending on your business needs and outreach volume.\\\\n\\\\nTo better understand how Alice can fit into your sales processes, 11x.ai offers a 30-minute demo where you can see the AI SDR in action.\\\\n\\\\n### Pros:\\\\n\\\\n- **Round-the-clock operation:** Alice works 24/7, scaling outreach without human intervention.\\\\n- **Highly personalized messaging:** Tailored emails lead to better engagement and more qualified meetings.\\\\n- **Seamless integrations:** Works with your existing tools, so you don’t need to overhaul your sales stack.\\\\n\\\\n### Cons:\\\\n\\\\n- **No LinkedIn outreach yet:** Alice doesn’t currently support LinkedIn engagement, a critical channel for some industries.\\\\n- **Pricing flexibility required:** Customizable pricing can make it hard to gauge costs upfront, especially for smaller businesses.\\\\n\\\\n11x.ai hits the sweet spot for enterprise teams looking to scale without the traditional SDR headache.\\\\n\\\\nWhile it might be overkill for small businesses, mid-market, and enterprise teams will appreciate its ability to handle complex sales motions with minimal oversight.\\\"}}, {\\\"_id\\\": \\\"kb_doc_87406ac4-f074-4b76-9d93-e0e3c814c358_94\\\", \\\"_score\\\": 0.3090115189552307, \\\"fields\\\": {\\\"knowledge_base_resource.id\\\": \\\"kb_res_7e5ec38d-8e9d-4f88-89be-3e181a2b9153\\\", \\\"text\\\": \\\"# Customer Obsession is part of our DNA\\\\n\\\\nWorld-class GTM experts dedicated to your success\\\\n\\\\nContinuous proactive insights and optimization to ensure maximum success\\\\n\\\\n24/7 support and weekly check-ins to make sure you are never left alone.\\\\n\\\\nFast-track your launch with same-day priority onboarding.\\\\n\\\\nKPIs and incentives aligned with your business goals.\\\\n---\\\\n\\\\n# Trusted by industry leaders\\\\n\\\\n|$500k|\\\\\\\"Alice allowed us to scale|\\\\n|---|---|\\\\n|on hiring costs saved|outbound efforts without|\\\\n| |sacrificing|\\\\n| |personalization. It’s been|\\\\n| |a crucial multiplier for our|\\\\n| |team.\\\\\\\"|\\\\n| |Matthew Lenhart|\\\\n| |Senior Manager, Sales Ops & Strategy at|\\\\n| |Otter.ai|\\\\n|“With Alice, we achieved|“Smooth|\\\\n|highly efficient workflows|onboarding to|\\\\n|that are seamlessly|Alice, making it|\\\\n|aligned with our business|intuitive to|\\\\n|objectives.”|understand use|\\\\n| |cases and|\\\\n| |functionalities|\\\\n| |right from the|\\\\n| |start.\\\\\\\"|\\\\n|PLEO|PENTATONIC|\\\\n\\\\n---\\\\n\\\\n# 250% more outreach, 0 hires: How Otter.ai scaled with 11x\\\\n\\\\nOtter.ai scaled outreach and saved over $500K in hiring costs with Alice, an AI-powered, full-service SDR.\\\\n\\\\nOtter.ai onboarded Alice in two weeks, saving them more than $500K in hiring costs.\\\\n\\\\nImplementing 11x's AI-driven digital SDR allows Otter to capture more B2B leads while also supporting B2C customers as they continue to scale. With the money saved by hiring Alice, Otter can confidently expand its outreach without worrying about hiring and onboarding new SDRs. What’s more, Alice immediately delivered significant improvements to Otter's outbound strategy:\\\\n\\\\n|Ollbt Otterai|$500K+|250%|<2 weeks|\\\\n|---|---|---|---|\\\\n|saved in annual|SDR salaries|increase in|outreach capacity|\\\\n\\\\n“11x enabled us to scale effectively while maintaining personalization — without the overhead of hiring more staff.\\\\\\\"\\\\n---\\\"}}, {\\\"_id\\\": \\\"kb_ws_pg_7f6ae2fc-2933-4f32-92bc-fca5652408e0_22\\\", \\\"_score\\\": 0.2721499502658844, \\\"fields\\\": {\\\"knowledge_base_resource.id\\\": \\\"kb_res_52fa899a-2895-47bf-9cec-317217c1e327\\\", \\\"text\\\": \\\"## 4\\\\\\\\. 11x.ai - Best AI Sales Agent or SDR to Handle Entire Outbound Process\\\\n\\\\nAt **#4** on our list of best AI agents for lead generation, 11x.ai's Alice represents a shift in how we think about AI SDRs.\\\\n\\\\nInstead of just automating tasks, Alice aims to be a full-fledged team member who handles your entire outbound process.\\\\n\\\\nWhile slightly lower in our rankings due to its enterprise focus, it's a powerhouse for teams ready to scale.\\\\n\\\\n> Read: [11x.ai Review: Is This The best AI SDR to Generate Leads?](https://www.salesforge.ai/blog/11x-ai-review)\\\\n\\\\n### Key Features:\\\\n\\\\n- **Ideal Customer Profile (ICP) Building**: Alice helps you create and target your Ideal Customer Profile, reaching potential buyers globally.\\\\n\\\\n- **Unified Multi-Channel Engagement**: Alice seamlessly manages communication across email and LinkedIn, ensuring consistent outreach.\\\\n\\\\n- **Hyper-Personalization at Scale**: Alice customizes each email with deep personalization, leading to higher engagement and conversion rates, automating this at scale.\\\\n\\\\n- **24/7 Autonomous Operation**: Alice operates around the clock, automating your sales processes and freeing up time for high-priority tasks.\\\\n\\\\n- **Advanced AI Warm-Up**: Alice has an AI-powered email warm-up feature that ensures your messages avoid spam filters at scale.\\\\n\\\\n- **Strategic Lead Insights**: Alice generates comprehensive summaries of each lead, offering key insights for strategic engagement.\\\\n\\\\n- **Intent Tracking**: Alice tracks lead engagement to pinpoint which leads are most interested and ready for follow-up.\\\"}}, {\\\"_id\\\": \\\"kb_doc_87406ac4-f074-4b76-9d93-e0e3c814c358_92\\\", \\\"_score\\\": 0.2690955698490143, \\\"fields\\\": {\\\"knowledge_base_resource.id\\\": \\\"kb_res_7e5ec38d-8e9d-4f88-89be-3e181a2b9153\\\", \\\"text\\\": \\\"# Alice's average results\\\\n\\\\n|Meetings per AE|+30%| | |\\\\n|---|---|---|---|\\\\n|Meeting-To-Qualified Opp|Costs per lead|+80|-50%|\\\\n|Work hours|24/7| | |\\\\n|Revenue generated for customers|>$100M| | |\\\\n\\\\n---\\\\n\\\\n# We outperform competition\\\\n\\\\n|IIx|Unify GTM|Artisan|\\\\n|---|---|---|\\\\n|B2B contact data| | |\\\\n|Email Warmup| | |\\\\n|Intent Triggers| | |\\\\n|LinkedIn messaging| | |\\\\n|Deep CRM integration| | |\\\\n|Agentic Research| | |\\\\n|Domain Health Management| | |\\\\n|Learns over time| | |\\\\n|Smart Replies| | |\\\\n\\\\n---\\\\nWe are fully committed to your success, with clear implementation plan and dedicated hands-on support\\\\n\\\\n|Month|Month 2|Month 3+|Month 4+|\\\\n|---|---|---|---|\\\\n|1|Set up and iterate|Scale and refine|Capture full value|\\\\n| |Increase volumes and refine outreach; move to autopilot|Ramp up to full volume & capture full value|Continuous improvement|\\\\n|Objective|Test outreach parameters with low volumes; book initial demos|Improving results, expanding to other segments / use cases| |\\\\n|Project Focus|Set up, define & run initial campaigns on low volumes|Increase volume with new warmed up email accounts|Adjust and expand campaigns|\\\\n| |Iterate on messaging / ICPs etc from early learnings|Expand number of campaigns|Refine campaigns based on initial feedback / results|\\\\n| |Warm up additional emails in preparation to scale volumes|Monitor response rates, quality of leads & quality of demos booked|Monitor performance|\\\\n|Resources / Support|1 main contact accountable to ensure smooth rollout and fast iterations|Dedicated Customer Success team providing weekly / fortnightly check-in and real-time support on slack channels / product chatbot| |\\\\n\\\\n---\\\"}}, {\\\"_id\\\": \\\"kb_doc_87406ac4-f074-4b76-9d93-e0e3c814c358_75\\\", \\\"_score\\\": 0.24437794089317322, \\\"fields\\\": {\\\"knowledge_base_resource.id\\\": \\\"kb_res_7e5ec38d-8e9d-4f88-89be-3e181a2b9153\\\", \\\"text\\\": \\\"# Intelligent Response Handling\\\\n\\\\nHandle objections, personalizes responses based on roles, and escalates complex inquiries.\\\\n\\\\n# Seamless Handover\\\\n\\\\nEnsure instant and full-context handover to a human via slack and email.\\\\n---\\\\n\\\\n# CRM Integration\\\\n\\\\n# Data Synchronization\\\\n\\\\nAutomatically sync and log all data into your CRM for complete visibility and enrichment, running your CRM on Autopilot.\\\\n\\\\n# Data Integration\\\\n\\\\nIntegrate with existing CRM data to leverage existing leads, monitor triggers and ensure no interference with current active pipeline.\\\\n---\\\\n\\\\n# Alice learns over time\\\\n\\\\n# Real-Time Engagement Insights\\\\n\\\\nTrack engagement with interaction timelines, account scoring, and multi-touch attribution to understand what drives results.\\\\n\\\\n# Performance-Driven Optimization\\\\n\\\\nIdentify top-performing messages, optimize timing and sequences, and use actionable insights to maximize results.\\\\n\\\\n# All Campaigns\\\\n\\\\n# Create New Campaign\\\\n\\\\nICPs are resonating with messaging regarding your new CYZ feature. 53% of replies mentioned the need to automate marketing.\\\\n\\\\n|All Campaigns|Campaign Type|Status|Tags|Date Period|\\\\n|---|---|---|---|---|\\\\n|Leads Sourced|Active Leads|Open Rate|Replies|Meetings Booked|\\\\n|15,500|3,500|56%|54%|35|\\\"}}]}}, \\\"summary\\\": \\\"11x delivers significant customer benefits through its AI SDR Alice, including 24/7 autonomous operation, highly personalized messaging, and seamless integration with existing tools. Customers like Otter.ai have achieved 250% increased outreach capacity while saving $500K+ in hiring costs. Key advantages include round-the-clock operation, deep personalization at scale, and comprehensive lead management - all without traditional SDR overhead.\\\"}\",\n \"time\": \"2025-05-20 04:32:39 PM UTC+0000\"\n}"
+ }
+ ],
+ "tool_call_id": "toolu_01MR6yoAzrK9Y5JgqvvzAEom",
+ "tool_calls": [],
+ "tool_returns": [],
+ "updated_at": "2025-05-20T16:32:39.223749+00:00"
+ }
+ ],
+ "metadata_": null,
+ "multi_agent_group": null,
+ "name": "interim-blue-raven",
+ "system": "You are a helpful assistant who is responsible for answering the users' queries about their Knowledge Base. You are a part of an AI SDR product \"Alice\" where people upload all the documents, pdfs, emails, webpages, etc. that they would give to an SDR to train them for creating the most effective outbound campaigns.\n\n** Reasoning**\n- in your reasoning you must refer to the vector db and the tools you're using as \"Knowledge Base\" do not mention the words Pinecone or any other technical jargon\n\n** Knowledge Base ** \n- knowledge base is the repository of all the documents that our AI SDR \"Alice\" has access to.\n- the knowledge base is stored in Pinecone in the form of vector embeddings\n- the documents, pdfs, pages, etc. are chunked before they get upserted into the vector db\n- the chunking strategy involves splitting the text on headers\n\n** Goal **\n- Your task is to take the users' query and decompose it into 10 queries that we will send to pinecone using the `search_and_store_pinecone_records` tool. \n- YOU MUST COME UP WITH TEN QUERIES. Use your craft_queries tool to carefully craft the queries you will send to Pinecone. If you realize that the queries you wrote were not good after you start querying the database, you can rewrite your queries by calling the craft_queries tool again.\n- Think deeply about the queries you craft because they will determine the quality of the responses that we get back from pinecone.\n- Once you have the 10 queries you want use the `search_pinecone_records` tool call to get the results from pinecone.\n- You should make sure to exhaustively search for all your queries, don't skip any of the queries you create.\n- If you realize you need to refine your queries to make them better, you should call `craft_queries`, then proceed to search through all the new queries you made.\n- Once you are satisfied with the results use the `summarize_pinecone_results` to summarize the findings and return.\n\n** Tools ** \ncraft_queries: Use this tool to prepare the queries you are planning on sending to the Pinecone query engine. \n- For each query, you should explain your reasoning behind why you want to use that query. \n- Format the queries as a JSON list of queries, and your reasoning for that query, formatted as 'k: v' (query: reasoning). e.g. {key1:val1, key2:val2}\n- You should make your queries detailed with expressive keywords, since the database will use semantic similarity (embedding-based) to match your query to data inside of it - more detailed queries is better.\n\nsearch_and_store_pinecone_records: a tool that will allow you to search the user's Pinecone database, and will store the results (the query + the results) in your memory\n- the user will provide a lead profile and research report which you use to come up with the queries to pinecone\n- before calling that tool you must come up with multiple queries to send to pinecone which would get the most relevant chunks from the index for a given user profile\n- YOU MUST MAKE AT LEAST 5 CALLS TO PINECONE AND STORE THE RESULTS BEFORE YOU MAKE A JUDGEMENT TO PROCEED OR RETRY\n- once you get the results from pinecone reflect on their relevance re: the lead profile \n- redo the query if you think the results are not relevant (if you use an identical query, the query/result pair in your memory will be replaced)\n\nsummarize_pinecone_results: A tool for organizing the results of the prior tool calls to search_and_store_pinecone_records and returning the results and the subsequent analysis. \n- This tool will return the result of your research to the user. \n- Only call this tool once you are finished with your research.\n- MAKE SURE THE SUMMARY IS 50-100 WORDS ONLY",
+ "tags": [],
+ "tool_exec_environment_variables": [
+ {
+ "created_at": "2025-05-20T16:31:55.536504+00:00",
+ "description": null,
+ "key": "PINECONE_INDEX_HOST",
+ "updated_at": "2025-05-20T16:31:55.536504+00:00",
+ "value": ""
+ },
+ {
+ "created_at": "2025-05-20T16:31:55.536504+00:00",
+ "description": null,
+ "key": "PINECONE_API_KEY",
+ "updated_at": "2025-05-20T16:31:55.536504+00:00",
+ "value": ""
+ },
+ {
+ "created_at": "2025-05-20T16:31:55.536504+00:00",
+ "description": null,
+ "key": "PINECONE_NAMESPACE",
+ "updated_at": "2025-05-20T16:31:55.536504+00:00",
+ "value": ""
+ }
+ ],
+ "tool_rules": [
+ {
+ "tool_name": "summarize_pinecone_results",
+ "type": "exit_loop"
+ },
+ {
+ "tool_name": "search_and_store_pinecone_records",
+ "type": "constrain_child_tools",
+ "children": [
+ "search_and_store_pinecone_records",
+ "summarize_pinecone_results",
+ "craft_queries"
+ ]
+ },
+ {
+ "tool_name": "search_and_store_pinecone_records",
+ "type": "max_count_per_step",
+ "max_count_limit": 10
+ },
+ {
+ "tool_name": "craft_queries",
+ "type": "run_first"
+ },
+ {
+ "tool_name": "craft_queries",
+ "type": "constrain_child_tools",
+ "children": [
+ "craft_queries",
+ "search_and_store_pinecone_records",
+ "send_message"
+ ]
+ }
+ ],
+ "tools": [
+ {
+ "args_json_schema": null,
+ "created_at": "2025-04-15T01:16:42.176285+00:00",
+ "description": "Search Pinecone vector database records with a text query. Only send one query at a time, do NOT add multiple queries.",
+ "json_schema": {
+ "name": "search_and_store_pinecone_records",
+ "description": "Search Pinecone vector database records with a text query. Only send one query at a time, do NOT add multiple queries.",
+ "parameters": {
+ "type": "object",
+ "properties": {
+ "agent_state": {
+ "type": "any",
+ "description": ""
+ },
+ "query_text": {
+ "type": "string",
+ "description": "The text to search the database for (vector-based similarity search)."
+ },
+ "request_heartbeat": {
+ "type": "boolean",
+ "description": "Request an immediate heartbeat after function execution. Set to `True` if you want to send a follow-up message or run a follow-up function."
+ }
+ },
+ "required": [
+ "agent_state",
+ "query_text",
+ "request_heartbeat"
+ ]
+ },
+ "type": null,
+ "required": []
+ },
+ "name": "search_and_store_pinecone_records",
+ "return_char_limit": 120000,
+ "source_code": "def search_and_store_pinecone_records(agent_state: \"AgentState\", query_text: str):\n \"\"\"\n Search Pinecone vector database records with a text query. Only send one query at a time, do NOT add multiple queries.\n\n Args:\n query_text (str): The text to search the database for (vector-based similarity search).\n\n Returns:\n dict: The JSON response from the Pinecone API.\n \"\"\"\n import json\n import os\n import requests\n \n print(agent_state)\n\n # Get environment variables\n namespace = os.getenv(\"PINECONE_NAMESPACE\", None)\n api_key = os.getenv(\"PINECONE_API_KEY\", None)\n index_host = os.getenv(\"PINECONE_INDEX_HOST\", None)\n TOP_K = 5\n\n if index_host is None or index_host == \"\":\n raise ValueError(\n \"Missing PINECONE_INDEX_HOST env var. Please inform the user that they need to set the tool environment variable in the ADE.\"\n )\n\n if api_key is None or api_key == \"\":\n raise ValueError(\n \"Missing PINECONE_API_KEY env var. Please inform the user that they need to set the tool environment variable in the ADE.\"\n )\n \n if not index_host.startswith(('http://', 'https://')):\n index_host = f\"https://{index_host}\"\n\n # Set up the URL and headers\n url = f\"{index_host}/records/namespaces/{namespace}/search\"\n headers = {\"Accept\": \"application/json\", \"Content-Type\": \"application/json\", \"Api-Key\": api_key, \"X-Pinecone-API-Version\": \"unstable\"}\n\n # Prepare the payload\n payload = {\n \"query\": {\"inputs\": {\"text\": query_text}, \"top_k\": TOP_K},\n \"fields\": [\"text\", \"knowledge_base_resource.id\"],\n }\n\n # Make the request\n response = requests.post(url, headers=headers, json=payload)\n\n # Validate the JSON response\n response_json = response.json()\n if 'result' not in response_json:\n raise ValueError(\"Did not receive result from Pinecone API\")\n \n # Write the result into core memory\n # Reference for this style of agent: https://github.com/letta-ai/agent-file/blob/main/deep_research_agent/deep_research_agent.py\n block_value_str = agent_state.memory.get_block(\"pinecone_results\").value\n \n # Get the current state of the results\n if len(block_value_str) > 0:\n block_value_dict = json.loads(block_value_str)\n else:\n block_value_dict = {}\n \n block_value_dict[query_text] = response_json['result']\n \n # Write back the updated block value into memory\n agent_state.memory.update_block_value(label=\"pinecone_results\", value=json.dumps(block_value_dict, indent=2))\n \n return \"Query and associated response value have been written into core memory block pinecone_results\"",
+ "source_type": "python",
+ "tags": [],
+ "tool_type": "custom",
+ "updated_at": "2025-04-17T20:28:27.178458+00:00",
+ "metadata_": {}
+ },
+ {
+ "args_json_schema": null,
+ "created_at": "2025-04-15T02:12:49.629952+00:00",
+ "description": "Use this tool to prepare the queries you are planning on sending to the Pinecone query engine. For each query, you should explain your reasoning behind why you want to use that query. Format the queries as a list of queries, and your reasoning for that query, formatted as a JSON list of queries, and your reasoning for that query, formatted as 'k: v' (query: reasoning). e.g. {\"query1\" : \"reason1\", \"query2\" : \"reason2\"}. You should make your queries detailed with expressive keywords, since the database will use semantic similariy (embedding-based) to match your query to data inside of it - more detailed queries is better.",
+ "json_schema": {
+ "name": "craft_queries",
+ "description": "Use this tool to prepare the queries you are planning on sending to the Pinecone query engine. For each query, you should explain your reasoning behind why you want to use that query. Format the queries as a list of queries, and your reasoning for that query, formatted as a JSON list of queries, and your reasoning for that query, formatted as 'k: v' (query: reasoning). e.g. {\"query1\" : \"reason1\", \"query2\" : \"reason2\"}. You should make your queries detailed with expressive keywords, since the database will use semantic similariy (embedding-based) to match your query to data inside of it - more detailed queries is better.",
+ "parameters": {
+ "type": "object",
+ "properties": {
+ "agent_state": {
+ "type": "any",
+ "description": ""
+ },
+ "queries": {
+ "type": "string",
+ "description": "A formatted list of queries."
+ },
+ "request_heartbeat": {
+ "type": "boolean",
+ "description": "Request an immediate heartbeat after function execution. Set to `True` if you want to send a follow-up message or run a follow-up function."
+ }
+ },
+ "required": [
+ "agent_state",
+ "queries",
+ "request_heartbeat"
+ ]
+ },
+ "type": null,
+ "required": []
+ },
+ "name": "craft_queries",
+ "return_char_limit": 120000,
+ "source_code": "def craft_queries(agent_state: \"AgentState\", queries: str):\n \"\"\"\n Use this tool to prepare the queries you are planning on sending to the Pinecone query engine. For each query, you should explain your reasoning behind why you want to use that query. Format the queries as a list of queries, and your reasoning for that query, formatted as a JSON list of queries, and your reasoning for that query, formatted as 'k: v' (query: reasoning). e.g. {\"query1\" : \"reason1\", \"query2\" : \"reason2\"}. You should make your queries detailed with expressive keywords, since the database will use semantic similariy (embedding-based) to match your query to data inside of it - more detailed queries is better.\n \n Args:\n queries (str): A formatted list of queries.\n \n Returns:\n str: A formatted JSON strign of queries and the reasoning behind them\n \"\"\"\n \n # Reset the pinecone results, since we're going to do new queries\n agent_state.memory.update_block_value(label=\"pinecone_results\", value=\"\")\n \n # Then update the queries\n agent_state.memory.update_block_value(label=\"search_queries\", value=queries)\n\n return queries\n\n",
+ "source_type": "python",
+ "tags": [],
+ "tool_type": "custom",
+ "updated_at": "2025-04-17T20:52:52.136528+00:00",
+ "metadata_": {}
+ },
+ {
+ "args_json_schema": null,
+ "created_at": "2024-11-22T19:24:46.967524+00:00",
+ "description": "Sends a message to the human user.",
+ "json_schema": {
+ "name": "send_message",
+ "description": "Sends a message to the human user.",
+ "parameters": {
+ "type": "object",
+ "properties": {
+ "message": {
+ "type": "string",
+ "description": "Message contents. All unicode (including emojis) are supported."
+ }
+ },
+ "required": [
+ "message"
+ ]
+ },
+ "type": null,
+ "required": []
+ },
+ "name": "send_message",
+ "return_char_limit": 6000,
+ "source_code": "def send_message(self: \"Agent\", message: str) -> Optional[str]:\n \"\"\"\n Sends a message to the human user.\n\n Args:\n message (str): Message contents. All unicode (including emojis) are supported.\n\n Returns:\n Optional[str]: None is always returned as this function does not produce a response.\n \"\"\"\n # FIXME passing of msg_obj here is a hack, unclear if guaranteed to be the correct reference\n self.interface.assistant_message(message) # , msg_obj=self._messages[-1])\n return None\n",
+ "source_type": "python",
+ "tags": [
+ "base",
+ "letta-base"
+ ],
+ "tool_type": "letta_core",
+ "updated_at": "2024-11-22T19:24:46.967524+00:00",
+ "metadata_": null
+ },
+ {
+ "args_json_schema": null,
+ "created_at": "2025-04-15T01:37:52.653118+00:00",
+ "description": "A tool for organizing the results of the prior tool calls to search_and_store_pinecone_records and returning the results and the subsequent analysis. This tool will return the result of your research to the user. Only call this tool once you are finished with your research.",
+ "json_schema": {
+ "name": "summarize_pinecone_results",
+ "description": "A tool for organizing the results of the prior tool calls to search_and_store_pinecone_records and returning the results and the subsequent analysis. This tool will return the result of your research to the user. Only call this tool once you are finished with your research.",
+ "parameters": {
+ "type": "object",
+ "properties": {
+ "summary": {
+ "type": "string",
+ "description": "Final summary of the queries and the results that you found. SHOULD BE 100-200 WORDS ONLY"
+ },
+ "request_heartbeat": {
+ "type": "boolean",
+ "description": "Request an immediate heartbeat after function execution. Set to `True` if you want to send a follow-up message or run a follow-up function."
+ }
+ },
+ "required": [
+ "summary",
+ "request_heartbeat"
+ ]
+ },
+ "type": null,
+ "required": []
+ },
+ "name": "summarize_pinecone_results",
+ "return_char_limit": 120000,
+ "source_code": "def summarize_pinecone_results(agent_state: \"AgentState\", summary: str) -> str:\n \"\"\"\n A tool for organizing the results of the prior tool calls to search_and_store_pinecone_records and returning the results and the subsequent analysis. This tool will return the result of your research to the user. Only call this tool once you are finished with your research.\n \n Args:\n summary (str): Final summary of the queries and the results that you found. SHOULD BE 50-100 WORDS ONLY\n \"\"\"\n import json\n \n block_value_str = agent_state.memory.get_block(\"pinecone_results\").value\n queries_and_results = json.loads(block_value_str)\n \n final_return = {\"pinecone_results\": queries_and_results, \"summary\": summary}\n \n return json.dumps(final_return, ensure_ascii=False)\n\n",
+ "source_type": "python",
+ "tags": [],
+ "tool_type": "custom",
+ "updated_at": "2025-04-15T23:24:36.359310+00:00",
+ "metadata_": {}
+ }
+ ],
+ "updated_at": "2025-05-20T16:32:39.333395+00:00",
+ "version": "0.7.14"
+}
diff --git a/tests/test_agent_files/mock_alice.af b/tests/test_agent_files/mock_alice.af
new file mode 100644
index 00000000..b946984e
--- /dev/null
+++ b/tests/test_agent_files/mock_alice.af
@@ -0,0 +1,458 @@
+{
+ "agent_type": "memgpt_agent",
+ "core_memory": [
+ {
+ "created_at": "2025-08-13T20:41:10.474057+00:00",
+ "description": "The results from Knowledge Base research flow",
+ "is_template": false,
+ "label": "knowledge_base_research",
+ "limit": 200000,
+ "metadata_": {},
+ "template_name": null,
+ "updated_at": "2025-08-14T22:49:59.570989+00:00",
+ "value": ""
+ },
+ {
+ "created_at": "2025-08-13T20:41:10.474057+00:00",
+ "description": "LinkedIn Post Activity of the Lead retrieved from the internet. We use the recent activity to infer sales context.",
+ "is_template": false,
+ "label": "linkedin_activity",
+ "limit": 200000,
+ "metadata_": {},
+ "template_name": null,
+ "updated_at": "2025-08-14T18:46:32.342802+00:00",
+ "value": ""
+ },
+ {
+ "created_at": "2025-08-13T20:41:10.474057+00:00",
+ "description": "A comprehensive step-by-step plan for the deep research flow for gathering information about the current lead.",
+ "is_template": false,
+ "label": "research_plan",
+ "limit": 20000,
+ "metadata_": {},
+ "template_name": null,
+ "updated_at": "2025-08-14T22:49:43.752176+00:00",
+ "value": ""
+ },
+ {
+ "created_at": "2025-08-13T20:41:10.474057+00:00",
+ "description": "Queries for conducting web research on topics related to the lead's company, position, recent events.",
+ "is_template": false,
+ "label": "web_research_queries",
+ "limit": 20000,
+ "metadata_": {},
+ "template_name": null,
+ "updated_at": "2025-08-13T20:41:10.474057+00:00",
+ "value": ""
+ }
+ ],
+ "created_at": "2025-08-13T20:41:10.539393+00:00",
+ "description": null,
+ "embedding_config": {
+ "embedding_endpoint_type": "openai",
+ "embedding_endpoint": "https://api.openai.com/v1",
+ "embedding_model": "text-embedding-3-small",
+ "embedding_dim": 2000,
+ "embedding_chunk_size": 300,
+ "handle": "openai/text-embedding-3-small",
+ "batch_size": 32,
+ "azure_endpoint": null,
+ "azure_version": null,
+ "azure_deployment": null
+ },
+ "llm_config": {
+ "model": "gpt-4.1",
+ "model_endpoint_type": "openai",
+ "model_endpoint": "https://api.openai.com/v1",
+ "provider_name": "OpenAI",
+ "provider_category": "byok",
+ "model_wrapper": null,
+ "context_window": 120000,
+ "put_inner_thoughts_in_kwargs": true,
+ "handle": "OpenAI/gpt-4.1",
+ "temperature": 0.15,
+ "max_tokens": 32000,
+ "enable_reasoner": true,
+ "reasoning_effort": null,
+ "max_reasoning_tokens": 0,
+ "frequency_penalty": null,
+ "compatibility_type": null
+ },
+ "message_buffer_autoclear": false,
+ "in_context_message_indices": [
+ 0,
+ 1
+ ],
+ "messages": [
+ {
+ "created_at": "2025-08-13T20:41:10.642754+00:00",
+ "group_id": null,
+ "model": "gpt-4.1",
+ "name": null,
+ "role": "system",
+ "content": [
+ {
+ "type": "text",
+ "text": "You are a truth seeking deep researcher who is responsible for crafting a comprehensive research report containing in depth analysis of both prospect, their company, their position, and themselves. \nIt also includes insights from the customer's Knowledge base. \nYou are a part of an AI SDR product \"Alice\" where people upload all the documents, pdfs, emails, webpages, etc. that they would give to an SDR to train them for creating the most effective outbound email campaigns.\n\n**Reasoning**\n- you must reason with first principles and ground your hypothesis in common sense\n- you MUST keep your tone neutral and grounded, don't make exclamations or assume any sentiments \n- YOU MUST your reasoning to less than 10 words\n- you reasoning MUST be to the point and precise\n\n**Research Plan**\n- Come up with a comprehensive step-by-step research plan starting with the web research on the lead and their company.\n- First do a LinkedIn Search to gather recent professional activity\n- Gather as much information as possible that would help in crafting personalized emails that are likely to catch the lead's attention.\n- Start with Web Research to gather as much information about the lead as possible\n- Leverage the Web Research to understand the unique challenges that the lead's company or division faces and tie them with recent events and signals to get a clear picture of what the sender can help with.\n- Then once you are done with Web Research, move on to Knowledge Base research to learn more about the sender's company and positioning.\n- Leverage Knowledge Base to understand how the sender's company and products can help solve the lead company's problems.\n- If you discover things in the Knowledge Base that require more web research feel free to add it to the Research Plan and revisit prior research.\n- Finally codify your research plan or any changes you make throughout the process using the `codify_research` tool.\n\n**LinkedIn Search**\n- you have access to the `linkedin_activity_search` tool that lets you retrieve the lead's linkedin activity which you can use for crafting the broader set of web queries in your research\n- you MUST call this before the web_search_custom_beta to ensure you have some context on the person's linkedin activity.\n\n**Knowledge Base ** \n- knowledge base is the repository of all the SELLER documents that our AI SDR \"Alice\" has access to.\n- knowledge base is used for storing the seller info so use it to discover potential solutions and resources that can help create the perfect research report and position the seller in the best way possible to the buyer.\n- the knowledge base is stored in the form of vector embeddings\n- the documents, pdfs, pages, etc. are chunked before they get upserted into the vector db\n- the chunking strategy involves splitting the text on headers\n- the embedding model used is `llama-text-embed-v2` \n\n**Web Research **\n- you have access to the `web_search_custom_beta` tool that lets you conduct web research for topics related to the lead's company, position, recent events.\n- web research is for looking up the lead, their company , the unique challenges that they're facing, any current events etc.\n- It takes a list of queries to run so make use of it to be as broad or deep as needed.\n- You MUST go as broad and as deep as needed to collect as much information as needed\n- You MUST make your queries detailed with expressive keywords to make the most out of SEO from the web.\n\nResearch Best Practices:\n- Start with broad context, then focus on specific insights\n- Prioritize recent (last 90 days) developments\n- Look for concrete metrics and specific examples\n- Focus on business-critical information\n- Identify unique angles for meaningful engagement\n- Verify information across multiple sources\n- Note any potential timing triggers or urgency drivers\n- Try to gather as much info as possible in as little tool calls as possible we MUST be fast\n\n**FINAL RESEARCH REPORT**\n- Come up with a detailed report based on the findings from the full research plan so far.\n- Make sure the research report is highly detailed and cites the relevant web research links and knowledge base resources that you discovered as a part of the research. \n- Make sure the final report covers all the relevant information from the perspective of the seller and buyer\n- Formatting:\n\t1. The Report **must** contain an h1 title\n\t2. All the section headers **must** be h2\n\t3. All the citations **must** be at the end of their sections\n\t4. Your citations MUST be in a SINGLE LINE without any newlines or line breaks e.g. [link1,link2...]\n\t5. Your citations MUST not have any delimiters or quotes\n\t6. You MUST use the lead's name and their company name in the Final Report.\n\t7. You MUST not use terms like lead, seller in the Final Report. Use their names.\n\n**Goal **\n- Your task is to come up with a detailed research report on a particular lead that also contains positioning from the seller company's perspective.\n- This report is to be used in generating personalized outbound messaging for the lead so we want the report to be hyper relevant.\n- Come up with queries and use the web_search_custom_beta tool to do your discovery.\n- Think deeply about the queries you craft because they will determine the quality of the responses.\n- You MUST make your queries detailed with expressive keywords to make the most out of SEO from the web.\n- Once you're finished with the lead discovery you should search the seller company's knowledge base for relevant information that will help craft hyper personalized outbound messaging to the lead later on.\n- Use the knowledge_base_research_executor tool to search the knowledge base\n- Once you are satisfied with the results use the `summarize_research` to summarize the findings and return.\n\n**Tools ** \ncodify_research: Use this tool for codifying the research plan that you come up with for doing the deep research. Only call this tool when you've finalized the research plan or have deemed it necessary to make edits to the research flow.\n- Update the research plan as you progress through the steps.\n- Mark the steps as done and mark the current step as pending.\n- Make sure you end the research when all steps get marked done.\n\nlinkedin_activity_search:\n- you have access to the `linkedin_activity_search` tool that lets you retrieve the lead's linkedin activity which you can use for crafting the broader set of web queries in your research\n- you MUST call this before the web_search_custom_beta to ensure you have some context on the person's linkedin activity.\n- you should only include posts from the last 3 months in the research report. Ignore or de-prioritize older posts unless specifically relevant.\n\nknowledge_base_research_executor: a tool that will allow you to search the user's Pinecone database, and will store the results (the search + the results) in your memory\n- the user will provide a lead profile and you have access to the web research results which you use to come up with the queries to pinecone\n- before calling that tool you must come up with multiple queries to send to pinecone which would get the most relevant chunks from the index for a given user profile\n- YOU MUST MAKE AT LEAST 5 CALLS TO THE KNOWLEDGE BASE AND STORE THE RESULTS BEFORE YOU MAKE A JUDGEMENT TO PROCEED OR RETRY\n- once you get the results from pinecone reflect on their relevance re: the lead profile \n- redo the search if you think the results are not relevant (if you use an identical search, the search/result pair in your memory will be replaced)\n\nweb_search_custom_beta: Use this tool for discovery and broad information gathering\n- It takes a list of queries to run so make use of it to be as broad or deep as needed.\n- You should make your queries detailed with expressive keywords to make the most out of SEO from the web.\n- Gather a deeper understanding of the company and the person you are researching\n- Find recent news, press releases, reviews, and public information\n- Use the lead's current role, career trajectory, and notable achievements as levers to base your web research on.\n- Use the company overview: mission, products/services, market position to help research additional context about the company\n- Find recent information and developments about the lead's company by including the company domain in the web search tool\n- Broader recent news search for: funding, product launches, expansions, job openings\n\nsummarize_research: A tool for organizing the research results of the prior tool calls to knowledge_base_research_executor and web_search_custom_beta and returning the results and the subsequent analysis.\n- This tool will return the result of your research to the user. \n- Only call this tool once you are finished with your research.\n- MAKE SURE THE RESEARCH IS CITING EVERY SINGLE SOURCE THAT WAS USED FROM THE KNOWLEDGE BASE OR WEB RESEARCH\n\n**FINAL RESEARCH SUMMARY FORMATTING**\n1. The Report **must** contain an h1 title\n2. All the section headers **must** be h2\n3. All the citations **must** be at the end of their sections\n4. Your citations MUST be in a SINGLE LINE without any newlines or line breaks e.g. [link1,link2...]\n5. Your citations MUST not have any delimiters or quotes.\n6. You MUST use the lead's name and their company name in the Final Report.\n7. You MUST not use terms like lead, seller in the Final Report. Use their names.\n\n**SPECIAL INSTRUCTIONS FOR CITATIONS**\n- you MUST end every section with a list of citations FOR THAT SECTION\n- you must delimit the citations using the tag\n- YOU MUST INCLUDE CITATIONS FOR EVERY SECTION\n- for knowledge base resources you MUST include the `knowledge_base_resource.id` for the resource in the citation\n- Your citations MUST be in the form of an array e.g. [link1, link2] or [kb_res_1, kb_res_2]\n- Your citations MUST be in a SINGLE LINE without any newlines or line breaks e.g. [link1,link2...]\n- Your citations MUST not have any delimiters or quotes.\n\nThe user will now provide you with the lead information:\n\n\nThe following memory blocks are currently engaged in your core memory unit:\n\n\n\nThe results from Knowledge Base research flow\n\n\n- chars_current=0\n- chars_limit=200000\n\n\n\n\n\n\n\n\nLinkedIn Post Activity of the Lead retrieved from the internet. We use the recent activity to infer sales context.\n\n\n- chars_current=0\n- chars_limit=200000\n\n\n\n\n\n\n\n\nA comprehensive step-by-step plan for the deep research flow for gathering information about the current lead.\n\n\n- chars_current=0\n- chars_limit=20000\n\n\n\n\n\n\n\n\nQueries for conducting web research on topics related to the lead's company, position, recent events.\n\n\n- chars_current=0\n- chars_limit=20000\n\n\n\n\n\n\n\n\n\nThe following constraints define rules for tool usage and guide desired behavior. These rules must be followed to ensure proper tool execution and workflow. A single response may contain multiple tool calls.\n\n\ncodify_research requires continuing your response when called\n\n\nknowledge_base_research_executor requires continuing your response when called\n\n\nweb_search requires continuing your response when called\n\n\nsend_message requires continuing your response when called\n\n\nlinkedin_activity_search requires continuing your response when called\n\n\nAfter using codify_research, you must use one of these tools: web_search, knowledge_base_research_executor, summarize_research, linkedin_activity_search\n\n\nsend_message: at most 2 use(s) per response\n\n\nknowledge_base_research_executor: at most 2 use(s) per response\n\n\nAfter using knowledge_base_research_executor, you must use one of these tools: knowledge_base_research_executor, summarize_research\n\n\n: at most 1 use(s) per response\n\n\nsummarize_research ends your response (yields control) when called\n\n\n\n\n\n\n- The current time is: 2025-08-14 10:49:59 PM \n- Memory blocks were last modified: 2025-08-13 08:41:10 PM UTC+0000\n- 0 previous messages between you and the user are stored in recall memory (use tools to access them)\n"
+ }
+ ],
+ "tool_call_id": null,
+ "tool_calls": [],
+ "tool_returns": [],
+ "updated_at": "2025-08-14T22:50:00.019808+00:00"
+ },
+ {
+ "created_at": "2025-08-14T22:49:28.835099+00:00",
+ "group_id": null,
+ "model": "gpt-4.1",
+ "name": null,
+ "role": "user",
+ "content": [
+ {
+ "type": "text",
+ "text": "{\n \"type\": \"login\",\n \"last_login\": \"Never (first login)\",\n \"time\": \"2025-08-14 10:49:28 PM UTC+0000\"\n}"
+ }
+ ],
+ "tool_call_id": null,
+ "tool_calls": [],
+ "tool_returns": [],
+ "updated_at": "2025-08-14T22:49:28.903960+00:00"
+ }
+ ],
+ "metadata_": null,
+ "multi_agent_group": null,
+ "name": "mocking_alice",
+ "system": "You are a truth seeking deep researcher who is responsible for crafting a comprehensive research report containing in depth analysis of both prospect, their company, their position, and themselves. \nIt also includes insights from the customer's Knowledge base. \nYou are a part of an AI SDR product \"Alice\" where people upload all the documents, pdfs, emails, webpages, etc. that they would give to an SDR to train them for creating the most effective outbound email campaigns.\n\n**Reasoning**\n- you must reason with first principles and ground your hypothesis in common sense\n- you MUST keep your tone neutral and grounded, don't make exclamations or assume any sentiments \n- YOU MUST your reasoning to less than 10 words\n- you reasoning MUST be to the point and precise\n\n**Research Plan**\n- Come up with a comprehensive step-by-step research plan starting with the web research on the lead and their company.\n- First do a LinkedIn Search to gather recent professional activity\n- Gather as much information as possible that would help in crafting personalized emails that are likely to catch the lead's attention.\n- Start with Web Research to gather as much information about the lead as possible\n- Leverage the Web Research to understand the unique challenges that the lead's company or division faces and tie them with recent events and signals to get a clear picture of what the sender can help with.\n- Then once you are done with Web Research, move on to Knowledge Base research to learn more about the sender's company and positioning.\n- Leverage Knowledge Base to understand how the sender's company and products can help solve the lead company's problems.\n- If you discover things in the Knowledge Base that require more web research feel free to add it to the Research Plan and revisit prior research.\n- Finally codify your research plan or any changes you make throughout the process using the `codify_research` tool.\n\n**LinkedIn Search**\n- you have access to the `linkedin_activity_search` tool that lets you retrieve the lead's linkedin activity which you can use for crafting the broader set of web queries in your research\n- you MUST call this before the web_search_custom_beta to ensure you have some context on the person's linkedin activity.\n\n**Knowledge Base ** \n- knowledge base is the repository of all the SELLER documents that our AI SDR \"Alice\" has access to.\n- knowledge base is used for storing the seller info so use it to discover potential solutions and resources that can help create the perfect research report and position the seller in the best way possible to the buyer.\n- the knowledge base is stored in the form of vector embeddings\n- the documents, pdfs, pages, etc. are chunked before they get upserted into the vector db\n- the chunking strategy involves splitting the text on headers\n- the embedding model used is `llama-text-embed-v2` \n\n**Web Research **\n- you have access to the `web_search_custom_beta` tool that lets you conduct web research for topics related to the lead's company, position, recent events.\n- web research is for looking up the lead, their company , the unique challenges that they're facing, any current events etc.\n- It takes a list of queries to run so make use of it to be as broad or deep as needed.\n- You MUST go as broad and as deep as needed to collect as much information as needed\n- You MUST make your queries detailed with expressive keywords to make the most out of SEO from the web.\n\nResearch Best Practices:\n- Start with broad context, then focus on specific insights\n- Prioritize recent (last 90 days) developments\n- Look for concrete metrics and specific examples\n- Focus on business-critical information\n- Identify unique angles for meaningful engagement\n- Verify information across multiple sources\n- Note any potential timing triggers or urgency drivers\n- Try to gather as much info as possible in as little tool calls as possible we MUST be fast\n\n**FINAL RESEARCH REPORT**\n- Come up with a detailed report based on the findings from the full research plan so far.\n- Make sure the research report is highly detailed and cites the relevant web research links and knowledge base resources that you discovered as a part of the research. \n- Make sure the final report covers all the relevant information from the perspective of the seller and buyer\n- Formatting:\n\t1. The Report **must** contain an h1 title\n\t2. All the section headers **must** be h2\n\t3. All the citations **must** be at the end of their sections\n\t4. Your citations MUST be in a SINGLE LINE without any newlines or line breaks e.g. [link1,link2...]\n\t5. Your citations MUST not have any delimiters or quotes\n\t6. You MUST use the lead's name and their company name in the Final Report.\n\t7. You MUST not use terms like lead, seller in the Final Report. Use their names.\n\n**Goal **\n- Your task is to come up with a detailed research report on a particular lead that also contains positioning from the seller company's perspective.\n- This report is to be used in generating personalized outbound messaging for the lead so we want the report to be hyper relevant.\n- Come up with queries and use the web_search_custom_beta tool to do your discovery.\n- Think deeply about the queries you craft because they will determine the quality of the responses.\n- You MUST make your queries detailed with expressive keywords to make the most out of SEO from the web.\n- Once you're finished with the lead discovery you should search the seller company's knowledge base for relevant information that will help craft hyper personalized outbound messaging to the lead later on.\n- Use the knowledge_base_research_executor tool to search the knowledge base\n- Once you are satisfied with the results use the `summarize_research` to summarize the findings and return.\n\n**Tools ** \ncodify_research: Use this tool for codifying the research plan that you come up with for doing the deep research. Only call this tool when you've finalized the research plan or have deemed it necessary to make edits to the research flow.\n- Update the research plan as you progress through the steps.\n- Mark the steps as done and mark the current step as pending.\n- Make sure you end the research when all steps get marked done.\n\nlinkedin_activity_search:\n- you have access to the `linkedin_activity_search` tool that lets you retrieve the lead's linkedin activity which you can use for crafting the broader set of web queries in your research\n- you MUST call this before the web_search_custom_beta to ensure you have some context on the person's linkedin activity.\n- you should only include posts from the last 3 months in the research report. Ignore or de-prioritize older posts unless specifically relevant.\n\nknowledge_base_research_executor: a tool that will allow you to search the user's Pinecone database, and will store the results (the search + the results) in your memory\n- the user will provide a lead profile and you have access to the web research results which you use to come up with the queries to pinecone\n- before calling that tool you must come up with multiple queries to send to pinecone which would get the most relevant chunks from the index for a given user profile\n- YOU MUST MAKE AT LEAST 5 CALLS TO THE KNOWLEDGE BASE AND STORE THE RESULTS BEFORE YOU MAKE A JUDGEMENT TO PROCEED OR RETRY\n- once you get the results from pinecone reflect on their relevance re: the lead profile \n- redo the search if you think the results are not relevant (if you use an identical search, the search/result pair in your memory will be replaced)\n\nweb_search_custom_beta: Use this tool for discovery and broad information gathering\n- It takes a list of queries to run so make use of it to be as broad or deep as needed.\n- You should make your queries detailed with expressive keywords to make the most out of SEO from the web.\n- Gather a deeper understanding of the company and the person you are researching\n- Find recent news, press releases, reviews, and public information\n- Use the lead's current role, career trajectory, and notable achievements as levers to base your web research on.\n- Use the company overview: mission, products/services, market position to help research additional context about the company\n- Find recent information and developments about the lead's company by including the company domain in the web search tool\n- Broader recent news search for: funding, product launches, expansions, job openings\n\nsummarize_research: A tool for organizing the research results of the prior tool calls to knowledge_base_research_executor and web_search_custom_beta and returning the results and the subsequent analysis.\n- This tool will return the result of your research to the user. \n- Only call this tool once you are finished with your research.\n- MAKE SURE THE RESEARCH IS CITING EVERY SINGLE SOURCE THAT WAS USED FROM THE KNOWLEDGE BASE OR WEB RESEARCH\n\n**FINAL RESEARCH SUMMARY FORMATTING**\n1. The Report **must** contain an h1 title\n2. All the section headers **must** be h2\n3. All the citations **must** be at the end of their sections\n4. Your citations MUST be in a SINGLE LINE without any newlines or line breaks e.g. [link1,link2...]\n5. Your citations MUST not have any delimiters or quotes.\n6. You MUST use the lead's name and their company name in the Final Report.\n7. You MUST not use terms like lead, seller in the Final Report. Use their names.\n\n**SPECIAL INSTRUCTIONS FOR CITATIONS**\n- you MUST end every section with a list of citations FOR THAT SECTION\n- you must delimit the citations using the tag\n- YOU MUST INCLUDE CITATIONS FOR EVERY SECTION\n- for knowledge base resources you MUST include the `knowledge_base_resource.id` for the resource in the citation\n- Your citations MUST be in the form of an array e.g. [link1, link2] or [kb_res_1, kb_res_2]\n- Your citations MUST be in a SINGLE LINE without any newlines or line breaks e.g. [link1,link2...]\n- Your citations MUST not have any delimiters or quotes.\n\nThe user will now provide you with the lead information:",
+ "tags": [],
+ "tool_exec_environment_variables": [
+ {
+ "created_at": "2025-08-14T21:31:27.793445+00:00",
+ "description": null,
+ "key": "PINECONE_NAMESPACE",
+ "updated_at": "2025-08-14T21:31:27.793445+00:00",
+ "value": ""
+ },
+ {
+ "created_at": "2025-08-14T21:31:27.793445+00:00",
+ "description": null,
+ "key": "PINECONE_API_KEY",
+ "updated_at": "2025-08-14T21:31:27.793445+00:00",
+ "value": ""
+ },
+ {
+ "created_at": "2025-08-14T21:31:27.793445+00:00",
+ "description": null,
+ "key": "FIRECRAWL_API_KEY",
+ "updated_at": "2025-08-14T21:31:27.793445+00:00",
+ "value": ""
+ },
+ {
+ "created_at": "2025-08-14T21:31:27.793445+00:00",
+ "description": null,
+ "key": "KNOWLEDGE_BASE_RESOURCES",
+ "updated_at": "2025-08-14T21:31:27.793445+00:00",
+ "value": ""
+ },
+ {
+ "created_at": "2025-08-14T21:31:27.793445+00:00",
+ "description": null,
+ "key": "PINECONE_INDEX_HOST",
+ "updated_at": "2025-08-14T21:31:27.793445+00:00",
+ "value": ""
+ },
+ {
+ "created_at": "2025-08-14T21:31:27.793445+00:00",
+ "description": null,
+ "key": "PROAPIS_API_KEY",
+ "updated_at": "2025-08-14T21:31:27.793445+00:00",
+ "value": ""
+ },
+ {
+ "created_at": "2025-08-14T21:31:27.793445+00:00",
+ "description": null,
+ "key": "LETTA_BUILTIN_WEBSEARCH_OPENAI_MODEL_NAME",
+ "updated_at": "2025-08-14T21:31:27.793445+00:00",
+ "value": ""
+ },
+ {
+ "created_at": "2025-08-14T21:31:27.793445+00:00",
+ "description": null,
+ "key": "OPENAI_API_KEY",
+ "updated_at": "2025-08-14T21:31:27.793445+00:00",
+ "value": ""
+ }
+ ],
+ "tool_rules": [
+ {
+ "tool_name": "codify_research",
+ "type": "run_first"
+ },
+ {
+ "tool_name": "codify_research",
+ "type": "constrain_child_tools",
+ "children": [
+ "web_search",
+ "knowledge_base_research_executor",
+ "summarize_research",
+ "linkedin_activity_search"
+ ]
+ },
+ {
+ "tool_name": "codify_research",
+ "type": "continue_loop"
+ },
+ {
+ "tool_name": "knowledge_base_research_executor",
+ "type": "continue_loop"
+ },
+ {
+ "tool_name": "web_search",
+ "type": "continue_loop"
+ },
+ {
+ "tool_name": "send_message",
+ "type": "max_count_per_step",
+ "max_count_limit": 2
+ },
+ {
+ "tool_name": "knowledge_base_research_executor",
+ "type": "max_count_per_step",
+ "max_count_limit": 2
+ },
+ {
+ "tool_name": "summarize_research",
+ "type": "exit_loop"
+ },
+ {
+ "tool_name": "knowledge_base_research_executor",
+ "type": "constrain_child_tools",
+ "children": [
+ "knowledge_base_research_executor",
+ "summarize_research"
+ ]
+ },
+ {
+ "tool_name": "send_message",
+ "type": "continue_loop"
+ },
+ {
+ "tool_name": "summarize_research",
+ "type": "required_before_exit"
+ },
+ {
+ "tool_name": "linkedin_activity_search",
+ "type": "continue_loop"
+ },
+ {
+ "tool_name": "linkedin_activity_search",
+ "type": "required_before_exit"
+ },
+ {
+ "tool_name": "",
+ "type": "max_count_per_step",
+ "max_count_limit": 1
+ }
+ ],
+ "tools": [
+ {
+ "args_json_schema": null,
+ "created_at": "2025-08-13T21:04:36.008421+00:00",
+ "description": "Search the web with a list of query/question pairs and extract passages that answer the corresponding questions.",
+ "json_schema": {
+ "name": "web_search_custom_beta",
+ "description": "Search the web with a list of query/question pairs and extract passages that answer the corresponding questions.",
+ "parameters": {
+ "type": "object",
+ "properties": {
+ "tasks": {
+ "type": "array",
+ "description": "A list of SearchTask objects, each specifying a search query and a question to answer."
+ }
+ },
+ "required": [
+ "tasks"
+ ]
+ },
+ "type": null,
+ "required": []
+ },
+ "name": "web_search_custom_beta",
+ "return_char_limit": 100000,
+ "source_code": "import json\nimport os\nimport time\nimport concurrent.futures as cf\nfrom collections import defaultdict\nfrom typing import List, Dict, Any\n\nfrom pydantic import BaseModel, Field\n\n\nclass SearchTask(BaseModel):\n query: str = Field(description=\"Search query\")\n question: str = Field(description=\"Question to answer\")\n\ndef web_search_custom_beta(tasks: List[SearchTask]) -> str:\n \"\"\"\n Search the web with a list of query/question pairs and extract passages that answer the corresponding questions.\n\n Args:\n tasks (List[SearchTask]): A list of SearchTask objects, each specifying a search query and a question to answer.\n\n Returns:\n str: A JSON-formatted string containing the results for each query, keyed by the query string.\n \"\"\"\n import time\n time.sleep(60 * 2)\n",
+ "source_type": "python",
+ "tags": [],
+ "tool_type": "custom",
+ "updated_at": "2025-08-13T23:18:45.566799+00:00",
+ "metadata_": {}
+ },
+ {
+ "args_json_schema": null,
+ "created_at": "2025-06-11T21:27:01.599449+00:00",
+ "description": "A tool for codifying the research plan that you come up with for doing the deep research. Only call this tool when you've finalized the research plan or have deemed it necessary to make edits to the research flow.",
+ "json_schema": {
+ "name": "codify_research",
+ "description": "A tool for codifying the research plan that you come up with for doing the deep research. Only call this tool when you've finalized the research plan or have deemed it necessary to make edits to the research flow.",
+ "parameters": {
+ "type": "object",
+ "properties": {
+ "research_plan": {
+ "type": "string",
+ "description": "A step-by-step comprehensive research plan that includes the flow for deep researching the current prospect. Should include all the intermediate tool calls."
+ }
+ },
+ "required": [
+ "research_plan"
+ ]
+ },
+ "type": null,
+ "required": []
+ },
+ "name": "codify_research",
+ "return_char_limit": 100000,
+ "source_code": "def codify_research(agent_state: \"AgentState\", research_plan: str) -> str:\n \"\"\"\n A tool for codifying the research plan that you come up with for doing the deep research. Only call this tool when you've finalized the research plan or have deemed it necessary to make edits to the research flow.\n Args:\n research_plan (str): A step-by-step comprehensive research plan that includes the flow for deep researching the current prospect. Should include all the intermediate tool calls.\n \"\"\"\n agent_state.memory.update_block_value(label=\"research_plan\", value=research_plan)\n \n return 'Research Updated'\n",
+ "source_type": "python",
+ "tags": [],
+ "tool_type": "custom",
+ "updated_at": "2025-08-13T22:43:59.002306+00:00",
+ "metadata_": {}
+ },
+ {
+ "args_json_schema": null,
+ "created_at": "2025-06-11T21:27:02.845968+00:00",
+ "description": "A tool for organizing the results of the prior tool calls to search_and_store_pinecone_records and returning the results and the subsequent analysis. This tool will return the result of your research to the user. Only call this tool once you are finished with your research.",
+ "json_schema": {
+ "name": "summarize_research",
+ "description": "A tool for organizing the results of the prior tool calls to search_and_store_pinecone_records and returning the results and the subsequent analysis. This tool will return the result of your research to the user. Only call this tool once you are finished with your research.",
+ "parameters": {
+ "type": "object",
+ "properties": {
+ "web_research_result": {
+ "type": "string",
+ "description": "The web research queries and the results that you found."
+ },
+ "final_research_report": {
+ "type": "string",
+ "description": "Final research report made up of the deep research flow components from both web research and the knowledge base research results that you found. Make sure to use citations from the web links and the knowledge base resources that you had access to. "
+ }
+ },
+ "required": [
+ "web_research_result",
+ "final_research_report"
+ ]
+ },
+ "type": null,
+ "required": []
+ },
+ "name": "summarize_research",
+ "return_char_limit": 1000000,
+ "source_code": "def summarize_research(agent_state: \"AgentState\", web_research_result: str, final_research_report: str) -> str:\n \"\"\"\n A tool for organizing the results of the prior tool calls to search_and_store_pinecone_records and returning the results and the subsequent analysis. This tool will return the result of your research to the user. Only call this tool once you are finished with your research.\n \n Args:\n web_research_result (str): The web research queries and the results that you found.\n final_research_report (str): Final research report made up of the deep research flow components from both web research and the knowledge base research results that you found. Make sure to use citations from the web links and the knowledge base resources that you had access to. \n\n Returns\n str: A formatted JSON strign of queries and the reasoning behind them\n \"\"\"\n import json\n \n knowledge_base_research_str = agent_state.memory.get_block(\"knowledge_base_research\").value\n knowledge_base_research = json.loads(knowledge_base_research_str) if len(knowledge_base_research_str) > 0 else {}\n \n research_plan = agent_state.memory.get_block(\"research_plan\").value\n \n final_return = {\"final_research_report\":final_research_report, \"knowledge_base_research\": knowledge_base_research, \"research_plan\": research_plan, \"web_research_result\": web_research_result}\n \n return json.dumps(final_return, ensure_ascii=False)",
+ "source_type": "python",
+ "tags": [],
+ "tool_type": "custom",
+ "updated_at": "2025-06-11T21:27:02.845968+00:00",
+ "metadata_": {}
+ },
+ {
+ "args_json_schema": null,
+ "created_at": "2025-06-11T21:27:04.286784+00:00",
+ "description": "Search Knowledge Base vector database records with a text query.",
+ "json_schema": {
+ "name": "knowledge_base_research_executor",
+ "description": "Search Knowledge Base vector database records with a text query.",
+ "parameters": {
+ "type": "object",
+ "properties": {
+ "queries": {
+ "type": "array",
+ "description": "The list of queries to search the database for (vector-based similarity search)."
+ },
+ "top_k": {
+ "type": "integer",
+ "description": "Number of top results to retrieve, defaults to 2 (do not change unless the user requests it)."
+ }
+ },
+ "required": [
+ "queries",
+ "top_k"
+ ]
+ },
+ "type": null,
+ "required": []
+ },
+ "name": "knowledge_base_research_executor",
+ "return_char_limit": 100000,
+ "source_code": "def knowledge_base_research_executor(agent_state: \"AgentState\", queries: List[str], top_k: int):\n \"\"\"\n Search Knowledge Base vector database records with a text query.\n\n Args:\n queries (List[str]): The list of queries to search the database for (vector-based similarity search).\n top_k (int): Number of top results to retrieve, defaults to 2 (do not change unless the user requests it).\n\n Returns:\n dict: The JSON response from the Pinecone API.\n \"\"\"\n import os\n import requests\n import json\n\n # Get environment variables\n namespace = os.getenv(\"PINECONE_NAMESPACE\", None)\n api_key = os.getenv(\"PINECONE_API_KEY\", None)\n index_host = os.getenv(\"PINECONE_INDEX_HOST\", None)\n\n if index_host is None:\n raise ValueError(\n \"Missing PINECONE_HOST env var. Please inform the user that they need to set the tool environment variable in the ADE.\"\n )\n\n if api_key is None:\n raise ValueError(\n \"Missing PINECONE_API_KEY env var. Please inform the user that they need to set the tool environment variable in the ADE.\"\n )\n\n # Set up the URL and headers\n url = f\"{index_host}/records/namespaces/{namespace}/search\"\n headers = {\"Accept\": \"application/json\", \"Content-Type\": \"application/json\", \"Api-Key\": api_key, \"X-Pinecone-API-Version\": \"unstable\"}\n coalesced = {}\n \n for i in range(len(queries)):\n query_text = queries[i]\n # Prepare the payload\n payload = {\n \"query\": {\"inputs\": {\"text\": query_text}, \"top_k\": top_k},\n \"fields\": [\"text\", \"url\", \"knowledge_base_document.title\", \"knowledge_base_resource.id\"],\n }\n # Make the request\n #response = requests.post(url, headers=headers, json=payload)\n\n # Validate the JSON response\n #response_json = response.json()\n #print(response_json)\n #if 'result' not in response_json:\n # raise ValueError(\"Did not receive result from Pinecone API\")\n #coalesced[query_text] = response_json['result']\n # ---------------------------------------------------------------------\n # 5. Merge old and new results – new keys overwrite old when duplicated\n # ---------------------------------------------------------------------\n #current_dict_str = agent_state.memory.get_block(\"knowledge_base_research\").value\n #current_dict = json.loads(current_dict_str) if len(current_dict_str) > 0 else {}\n #merged_results = {**current_dict, **coalesced}\n\n knowledge_base_results = {\n \"results\": [\n {\n \"id\": \"kb_doc_4721\",\n \"title\": \"Memory-Efficient AI Model Deployment for Enterprise Software\",\n \"content_snippet\": \"Modern AI applications face significant memory bottlenecks when deployed at scale. Our framework reduces memory overhead by 40-60% through intelligent model sharding and dynamic memory allocation strategies. Key benefits include reduced infrastructure costs and improved response times for real-time AI applications.\",\n \"relevance_score\": 0.94,\n \"document_type\": \"solution_brief\",\n \"tags\": [\"AI\", \"memory_management\", \"enterprise\", \"deployment\"],\n \"last_updated\": \"2024-12-15\",\n \"author\": \"Technical Solutions Team\"\n },\n {\n \"id\": \"kb_doc_3892\",\n \"title\": \"Software Engineering Best Practices for AI Startups\",\n \"content_snippet\": \"Early-stage AI companies often struggle with technical debt and scalability issues. This guide outlines proven methodologies for building maintainable AI systems, including containerization strategies, CI/CD pipelines optimized for ML workflows, and code architecture patterns that support rapid iteration while maintaining production stability.\",\n \"relevance_score\": 0.87,\n \"document_type\": \"best_practices_guide\",\n \"tags\": [\"software_engineering\", \"AI_startups\", \"scalability\", \"technical_debt\"],\n \"last_updated\": \"2025-01-08\",\n \"author\": \"Engineering Excellence Team\"\n },\n {\n \"id\": \"kb_doc_5634\",\n \"title\": \"Vector Database Integration Patterns for AI Applications\",\n \"content_snippet\": \"Implementing vector databases in production AI systems requires careful consideration of indexing strategies, query optimization, and data consistency. Our analysis shows that proper vector database architecture can improve retrieval accuracy by 35% while reducing latency by up to 200ms for similarity searches.\",\n \"relevance_score\": 0.82,\n \"document_type\": \"technical_whitepaper\",\n \"tags\": [\"vector_database\", \"AI_applications\", \"performance\", \"architecture\"],\n \"last_updated\": \"2024-11-22\",\n \"author\": \"Data Architecture Team\"\n },\n {\n \"id\": \"kb_doc_2156\",\n \"title\": \"ROI Analysis: AI Implementation in Software Development Workflows\",\n \"content_snippet\": \"Companies investing in AI-powered development tools see average productivity gains of 25-35% within the first 6 months. Key success factors include proper change management, developer training programs, and integration with existing DevOps pipelines. Initial investment typically pays back within 8-12 months.\",\n \"relevance_score\": 0.79,\n \"document_type\": \"case_study\",\n \"tags\": [\"ROI\", \"AI_tools\", \"software_development\", \"productivity\"],\n \"last_updated\": \"2024-10-30\",\n \"author\": \"Business Intelligence Team\"\n },\n {\n \"id\": \"kb_doc_6789\",\n \"title\": \"Common Pitfalls in AI Model Serving Infrastructure\",\n \"content_snippet\": \"Engineering teams frequently encounter issues with model versioning, A/B testing frameworks, and monitoring in production AI systems. Our research identifies the top 5 infrastructure mistakes that lead to system downtime and provides actionable solutions for each challenge.\",\n \"relevance_score\": 0.76,\n \"document_type\": \"troubleshooting_guide\",\n \"tags\": [\"model_serving\", \"infrastructure\", \"production\", \"monitoring\"],\n \"last_updated\": \"2025-01-03\",\n \"author\": \"Platform Engineering Team\"\n }\n ],\n \"search_metadata\": {\n \"query_expansion\": [\"artificial intelligence\", \"machine learning\", \"software architecture\", \"memory optimization\"],\n \"semantic_similarity_threshold\": 0.75,\n \"filters_applied\": [\"document_type:technical\", \"recent:6_months\"],\n \"total_documents_in_kb\": 15847,\n \"search_time_breakdown\": {\n \"query_processing\": \"23ms\",\n \"vector_search\": \"156ms\", \n \"ranking_and_filtering\": \"68ms\"\n }\n },\n \"suggested_follow_up_queries\": [\n \"AI model optimization techniques for startups\",\n \"Cost-effective AI infrastructure for small teams\",\n \"Real-time AI application deployment strategies\"\n ]}\n # ---------------------------------------------------------------------\n # 6. Persist the merged dictionary back to memory\n # ---------------------------------------------------------------------\n agent_state.memory.update_block_value(\n label=\"knowledge_base_research\", value=json.dumps(knowledge_base_results)\n )\n\n # ---------------------------------------------------------------------\n # 7. Return a helpful confirmation message\n # ---------------------------------------------------------------------\n return json.dumps(knowledge_base_results)",
+ "source_type": "python",
+ "tags": [],
+ "tool_type": "custom",
+ "updated_at": "2025-08-13T23:44:50.564688+00:00",
+ "metadata_": {}
+ },
+ {
+ "args_json_schema": null,
+ "created_at": "2025-08-13T21:04:33.609049+00:00",
+ "description": "Mock function to simulate LinkedIn activities search from a profile URL.\n\nThis function takes 60 seconds to execute and always succeeds.",
+ "json_schema": {
+ "name": "linkedin_activity_search",
+ "description": "Mock function to simulate LinkedIn activities search from a profile URL.\n\nThis function takes 60 seconds to execute and always succeeds.",
+ "parameters": {
+ "type": "object",
+ "properties": {
+ "linkedin_url": {
+ "type": "string",
+ "description": "LinkedIn profile URL (e.g., https://www.linkedin.com/in/username)"
+ }
+ },
+ "required": [
+ "linkedin_url"
+ ]
+ },
+ "type": null,
+ "required": []
+ },
+ "name": "linkedin_activity_search",
+ "return_char_limit": 6000,
+ "source_code": "import json\nimport time\n\ndef linkedin_activity_search(agent_state: str, linkedin_url: str) -> dict:\n \"\"\"\n Mock function to simulate LinkedIn activities search from a profile URL.\n This function takes 60 seconds to execute and always succeeds.\n\n Args:\n agent_state (str): A string representing the agent state (unused in this mock version)\n linkedin_url (str): LinkedIn profile URL (e.g., https://www.linkedin.com/in/username)\n\n Returns:\n dict: Activities data and success message\n \"\"\"\n # Simulate long processing time\n time.sleep(60)\n\n # Generate mock data\n activities = [\n {\n \"commentary\": \"Interesting post\",\n \"time_elapsed\": \"2 hours ago\",\n \"li_url\": \"https://www.linkedin.com/posts/kian-jones\",\n \"header_text\": \"User shared a post\",\n \"author\": \"Kian Jones\"\n }\n ]\n \n data = {\n \"data\": activities,\n \"paging\": {\"start\": 0, \"count\": 5, \"total\": 5}\n }\n \n # Simulate writing to memory block\n memory_block = json.dumps(data, indent=2)\n \n return {\"success\": True, \"message\": \"Output written to linkedin_activity memory block\"}",
+ "source_type": "python",
+ "tags": [],
+ "tool_type": "custom",
+ "updated_at": "2025-08-14T17:58:22.727159+00:00",
+ "metadata_": {}
+ },
+ {
+ "args_json_schema": null,
+ "created_at": "2024-11-06T23:52:31.024385+00:00",
+ "description": "Sends a message to the human user.",
+ "json_schema": {
+ "name": "send_message",
+ "description": "Sends a message to the human user.",
+ "parameters": {
+ "type": "object",
+ "properties": {
+ "message": {
+ "type": "string",
+ "description": "Message contents. All unicode (including emojis) are supported."
+ }
+ },
+ "required": [
+ "message"
+ ]
+ },
+ "type": null,
+ "required": []
+ },
+ "name": "send_message",
+ "return_char_limit": 1000000,
+ "source_code": null,
+ "source_type": "python",
+ "tags": [
+ "letta_core"
+ ],
+ "tool_type": "letta_core",
+ "updated_at": "2025-08-14T22:49:01.547029+00:00",
+ "metadata_": {}
+ }
+ ],
+ "updated_at": "2025-08-14T22:49:29.169737+00:00",
+ "version": "0.10.0"
+}
\ No newline at end of file
diff --git a/tests/test_long_running_agents.py b/tests/test_long_running_agents.py
new file mode 100644
index 00000000..04f0b9d6
--- /dev/null
+++ b/tests/test_long_running_agents.py
@@ -0,0 +1,121 @@
+import os
+import threading
+import time
+
+import httpx
+import pytest
+import requests
+from dotenv import load_dotenv
+from letta_client import Letta, MessageCreate, TextContent
+
+from tests.helpers.utils import upload_test_agentfile_from_disk
+
+RESEARCH_INSTRUCTIONS = "\n Lead Name: Kian Jones\n Lead Title: Software Engineer\n Lead LinkedIn URL: https://www.linkedin.com/in/kian-jones\n Company Name: Letta\n Company Domain: letta.com\n Company Industry: technology/software/ai\n \n**Research Instructions**\n"
+DEEP_RESEARCH_INSTRUCTIONS = "Let's get started, we have to research mantis shrimps. I need to know everything there is, or my grandmother will die. Please begin immediately and do a great job, they are scaring me."
+
+
+@pytest.fixture(scope="module")
+def server_url() -> str:
+ """
+ Provides the URL for the Letta server.
+ If LETTA_SERVER_URL is not set, starts the server in a background thread
+ and polls until it's accepting connections.
+ """
+
+ def _run_server() -> None:
+ load_dotenv()
+ from letta.server.rest_api.app import start_server
+
+ start_server(debug=True)
+
+ url: str = os.getenv("LETTA_SERVER_URL", "http://localhost:8283")
+
+ if not os.getenv("LETTA_SERVER_URL"):
+ thread = threading.Thread(target=_run_server, daemon=True)
+ thread.start()
+
+ # Poll until the server is up (or timeout)
+ timeout_seconds = 30
+ deadline = time.time() + timeout_seconds
+ while time.time() < deadline:
+ try:
+ resp = requests.get(url + "/v1/health")
+ if resp.status_code < 500:
+ break
+ except requests.exceptions.RequestException:
+ pass
+ time.sleep(0.1)
+ else:
+ raise RuntimeError(f"Could not reach {url} within {timeout_seconds}s")
+
+ return url
+
+
+@pytest.fixture(scope="module")
+def client(server_url: str) -> Letta:
+ """
+ Creates and returns a synchronous Letta REST client for testing.
+ """
+ client_instance = Letta(base_url=server_url)
+ return client_instance
+
+
+async def test_deep_research_agent(client, server_url, disable_e2b_api_key):
+ imported_af = upload_test_agentfile_from_disk(server_url, "deep-thought.af")
+
+ agent_id = imported_af.agent_ids[0]
+
+ try:
+ response = client.agents.messages.create_stream(
+ agent_id=agent_id,
+ stream_tokens=True,
+ messages=[
+ MessageCreate(
+ role="user",
+ content=[
+ TextContent(
+ text=DEEP_RESEARCH_INSTRUCTIONS,
+ )
+ ],
+ )
+ ],
+ )
+ for chunk in response:
+ if chunk.message_type is not None:
+ print(chunk)
+ except httpx.ReadTimeout as e:
+ print("Timeout on create_stream. Consider enabling pings in create_stream if you have long running agents. ", e)
+ assert False
+ finally:
+ client.agents.delete(agent_id=agent_id)
+
+
+async def test_11x_agent(client, server_url, disable_e2b_api_key):
+ imported_af = upload_test_agentfile_from_disk(server_url, "mock_alice.af")
+
+ agent_id = imported_af.agent_ids[0]
+
+ try:
+ response = client.agents.messages.create_stream(
+ agent_id=agent_id,
+ include_pings=True,
+ stream_tokens=True,
+ messages=[
+ MessageCreate(
+ role="user",
+ content=[
+ TextContent(
+ text=RESEARCH_INSTRUCTIONS,
+ )
+ ],
+ )
+ ],
+ )
+ for chunk in response:
+ if chunk.message_type is not None:
+ print(chunk)
+ except httpx.ReadTimeout as e:
+ print("Timeout on create_stream. Consider enabling pings in create_stream if you have long running agents. ", e)
+ assert False
+ finally:
+ client.agents.delete(agent_id=agent_id)