diff --git a/examples/docs/agent_advanced.py b/examples/docs/agent_advanced.py index 3bce3c1d..4451baf4 100644 --- a/examples/docs/agent_advanced.py +++ b/examples/docs/agent_advanced.py @@ -27,7 +27,7 @@ agent_state = client.agents.create( ), ], # LLM model & endpoint configuration - llm="openai/gpt-4", + model="openai/gpt-4", context_window_limit=8000, # embedding model & endpoint configuration (cannot be changed) embedding="openai/text-embedding-ada-002", diff --git a/examples/docs/agent_basic.py b/examples/docs/agent_basic.py index 90b8ac69..aa2e4204 100644 --- a/examples/docs/agent_basic.py +++ b/examples/docs/agent_basic.py @@ -18,7 +18,7 @@ agent_state = client.agents.create( ), ], # set automatic defaults for LLM/embedding config - llm="openai/gpt-4", + model="openai/gpt-4", embedding="openai/text-embedding-ada-002", ) print(f"Created agent with name {agent_state.name} and unique ID {agent_state.id}") diff --git a/examples/docs/rest_client.py b/examples/docs/rest_client.py index 0e98a3c7..9b099002 100644 --- a/examples/docs/rest_client.py +++ b/examples/docs/rest_client.py @@ -31,7 +31,7 @@ def main(): value="I am a friendly AI", ), ], - llm=llm_configs[0].handle, + model=llm_configs[0].handle, embedding=embedding_configs[0].handle, ) print(f"Created agent: {agent_state.name} with ID {str(agent_state.id)}") diff --git a/examples/docs/tools.py b/examples/docs/tools.py index e1ff8c26..78d9b98c 100644 --- a/examples/docs/tools.py +++ b/examples/docs/tools.py @@ -45,7 +45,7 @@ agent_state = client.agents.create( ), ], # set automatic defaults for LLM/embedding config - llm="openai/gpt-4", + model="openai/gpt-4", embedding="openai/text-embedding-ada-002", # create the agent with an additional tool tool_ids=[tool.id], @@ -88,7 +88,7 @@ agent_state = client.agents.create( value="username: sarah", ), ], - llm="openai/gpt-4", + model="openai/gpt-4", embedding="openai/text-embedding-ada-002", include_base_tools=False, tool_ids=[tool.id, send_message_tool], diff --git a/examples/notebooks/Agentic RAG with Letta.ipynb b/examples/notebooks/Agentic RAG with Letta.ipynb index ca28eda1..47df76bd 100644 --- a/examples/notebooks/Agentic RAG with Letta.ipynb +++ b/examples/notebooks/Agentic RAG with Letta.ipynb @@ -118,7 +118,7 @@ " value=\"Name: Sarah\",\n", " ),\n", " ],\n", - " llm=\"openai/gpt-4\",\n", + " model=\"openai/gpt-4\",\n", " embedding=\"openai/text-embedding-ada-002\",\n", ")" ] @@ -305,7 +305,7 @@ " ),\n", " ],\n", " # set automatic defaults for LLM/embedding config\n", - " llm=\"openai/gpt-4\",\n", + " model=\"openai/gpt-4\",\n", " embedding=\"openai/text-embedding-ada-002\",\n", ")\n", "normal_agent.tools" @@ -345,7 +345,7 @@ " ),\n", " ],\n", " # set automatic defaults for LLM/embedding config\n", - " llm=\"openai/gpt-4\",\n", + " model=\"openai/gpt-4\",\n", " embedding=\"openai/text-embedding-ada-002\",\n", " tools=['send_message'], \n", " include_base_tools=False\n", @@ -422,7 +422,7 @@ " + \"that you use to lookup information about users' birthdays.\"\n", " ),\n", " ],\n", - " llm=\"openai/gpt-4\",\n", + " model=\"openai/gpt-4\",\n", " embedding=\"openai/text-embedding-ada-002\"\n", ")" ] @@ -852,7 +852,7 @@ " ),\n", " ],\n", " tool_ids=[search_tool.id], \n", - " llm=\"openai/gpt-4\",\n", + " model=\"openai/gpt-4\",\n", ")" ] }, diff --git a/examples/notebooks/Multi-agent recruiting workflow.ipynb b/examples/notebooks/Multi-agent recruiting workflow.ipynb index a9ef6e6f..766badc9 100644 --- a/examples/notebooks/Multi-agent recruiting workflow.ipynb +++ b/examples/notebooks/Multi-agent recruiting workflow.ipynb @@ -182,7 +182,7 @@ " ],\n", " block_ids=[org_block.id],\n", " tool_ids=[read_resume_tool.id, submit_evaluation_tool.id]\n", - " llm=\"openai/gpt-4\",\n", + " model=\"openai/gpt-4\",\n", " embedding=\"openai/text-embedding-ada-002\",\n", ")\n" ] @@ -251,7 +251,7 @@ " ],\n", " block_ids=[org_block.id],\n", " tool_ids=[email_candidate_tool.id]\n", - " llm=\"openai/gpt-4\",\n", + " model=\"openai/gpt-4\",\n", " embedding=\"openai/text-embedding-ada-002\",\n", ")" ] @@ -623,7 +623,7 @@ " ],\n", " block_ids=[org_block.id],\n", " tool_ids=[read_resume_tool.id, submit_evaluation_tool.id]\n", - " llm=\"openai/gpt-4\",\n", + " model=\"openai/gpt-4\",\n", " embedding=\"openai/text-embedding-ada-002\",\n", ")\n", "\n", @@ -637,7 +637,7 @@ " ],\n", " block_ids=[org_block.id],\n", " tool_ids=[email_candidate_tool.id]\n", - " llm=\"openai/gpt-4\",\n", + " model=\"openai/gpt-4\",\n", " embedding=\"openai/text-embedding-ada-002\",\n", ")" ] @@ -740,7 +740,7 @@ " ],\n", " block_ids=[org_block.id],\n", " tool_ids=[search_candidate_tool.id, consider_candidate_tool.id],\n", - " llm=\"openai/gpt-4\",\n", + " model=\"openai/gpt-4\",\n", " embedding=\"openai/text-embedding-ada-002\"\n", ")\n", " \n" diff --git a/letta/schemas/agent.py b/letta/schemas/agent.py index 17e3ac6f..edcede23 100644 --- a/letta/schemas/agent.py +++ b/letta/schemas/agent.py @@ -123,7 +123,7 @@ class CreateAgent(BaseModel, validate_assignment=True): # ) description: Optional[str] = Field(None, description="The description of the agent.") metadata_: Optional[Dict] = Field(None, description="The metadata of the agent.", alias="metadata_") - llm: Optional[str] = Field( + model: Optional[str] = Field( None, description="The LLM configuration handle used by the agent, specified in the format " "provider/model-name, as an alternative to specifying llm_config.", @@ -166,17 +166,17 @@ class CreateAgent(BaseModel, validate_assignment=True): # return name - @field_validator("llm") + @field_validator("model") @classmethod - def validate_llm(cls, llm: Optional[str]) -> Optional[str]: - if not llm: - return llm + def validate_model(cls, model: Optional[str]) -> Optional[str]: + if not model: + return model - provider_name, model_name = llm.split("/", 1) + provider_name, model_name = model.split("/", 1) if not provider_name or not model_name: raise ValueError("The llm config handle should be in the format provider/model-name") - return llm + return model @field_validator("embedding") @classmethod @@ -184,8 +184,8 @@ class CreateAgent(BaseModel, validate_assignment=True): # if not embedding: return embedding - provider_name, model_name = embedding.split("/", 1) - if not provider_name or not model_name: + provider_name, embedding_name = embedding.split("/", 1) + if not provider_name or not embedding_name: raise ValueError("The embedding config handle should be in the format provider/model-name") return embedding diff --git a/letta/server/server.py b/letta/server/server.py index 8e01fc31..04841419 100644 --- a/letta/server/server.py +++ b/letta/server/server.py @@ -773,9 +773,9 @@ class SyncServer(Server): interface: Union[AgentInterface, None] = None, ) -> AgentState: if request.llm_config is None: - if request.llm is None: - raise ValueError("Must specify either llm or llm_config in request") - request.llm_config = self.get_llm_config_from_handle(handle=request.llm, context_window_limit=request.context_window_limit) + if request.model is None: + raise ValueError("Must specify either model or llm_config in request") + request.llm_config = self.get_llm_config_from_handle(handle=request.model, context_window_limit=request.context_window_limit) if request.embedding_config is None: if request.embedding is None: diff --git a/tests/test_server.py b/tests/test_server.py index b732e95b..53d973e1 100644 --- a/tests/test_server.py +++ b/tests/test_server.py @@ -330,7 +330,7 @@ def agent_id(server, user_id, base_tools): name="test_agent", tool_ids=[t.id for t in base_tools], memory_blocks=[], - llm="openai/gpt-4", + model="openai/gpt-4", embedding="openai/text-embedding-ada-002", ), actor=actor, @@ -351,7 +351,7 @@ def other_agent_id(server, user_id, base_tools): name="test_agent_other", tool_ids=[t.id for t in base_tools], memory_blocks=[], - llm="openai/gpt-4", + model="openai/gpt-4", embedding="openai/text-embedding-ada-002", ), actor=actor, @@ -550,7 +550,7 @@ def test_delete_agent_same_org(server: SyncServer, org_id: str, user: User): request=CreateAgent( name="nonexistent_tools_agent", memory_blocks=[], - llm="openai/gpt-4", + model="openai/gpt-4", embedding="openai/text-embedding-ada-002", ), actor=user, @@ -861,7 +861,7 @@ def test_memory_rebuild_count(server, user, mock_e2b_api_key_none, base_tools, b CreateBlock(label="human", value="The human's name is Bob."), CreateBlock(label="persona", value="My name is Alice."), ], - llm="openai/gpt-4", + model="openai/gpt-4", embedding="openai/text-embedding-ada-002", ), actor=actor, @@ -1048,7 +1048,7 @@ def test_add_remove_tools_update_agent(server: SyncServer, user_id: str, base_to CreateBlock(label="human", value="The human's name is Bob."), CreateBlock(label="persona", value="My name is Alice."), ], - llm="openai/gpt-4", + model="openai/gpt-4", embedding="openai/text-embedding-ada-002", include_base_tools=False, ), @@ -1119,7 +1119,10 @@ def test_messages_with_provider_override(server: SyncServer, user_id: str): ) agent = server.create_agent( request=CreateAgent( - memory_blocks=[], llm="anthropic/claude-3-opus-20240229", context_window_limit=200000, embedding="openai/text-embedding-ada-002" + memory_blocks=[], + model="anthropic/claude-3-opus-20240229", + context_window_limit=200000, + embedding="openai/text-embedding-ada-002", ), actor=actor, )