From 4304a2e2ef2a0588f53461589ec3f36100d4e95f Mon Sep 17 00:00:00 2001 From: cthomas Date: Tue, 30 Sep 2025 12:07:31 -0700 Subject: [PATCH] feat: integrate simple adapter for non-streaming letta v1 agent (#5017) --- letta/adapters/simple_llm_request_adapter.py | 4 ++++ letta/agents/letta_agent_v3.py | 3 +-- 2 files changed, 5 insertions(+), 2 deletions(-) diff --git a/letta/adapters/simple_llm_request_adapter.py b/letta/adapters/simple_llm_request_adapter.py index 1e436527..ca5c70ce 100644 --- a/letta/adapters/simple_llm_request_adapter.py +++ b/letta/adapters/simple_llm_request_adapter.py @@ -66,6 +66,10 @@ class SimpleLLMRequestAdapter(LettaLLMRequestAdapter): else: self.content = None + if self.reasoning_content and len(self.reasoning_content) > 0: + # Temp workaround to consolidate parts to persist reasoning content, this should be integrated better + self.content = self.reasoning_content + (self.content or []) + # Extract tool call if self.chat_completions_response.choices[0].message.tool_calls: self.tool_call = self.chat_completions_response.choices[0].message.tool_calls[0] diff --git a/letta/agents/letta_agent_v3.py b/letta/agents/letta_agent_v3.py index 064364ed..215c06e3 100644 --- a/letta/agents/letta_agent_v3.py +++ b/letta/agents/letta_agent_v3.py @@ -4,7 +4,6 @@ from typing import AsyncGenerator, Optional from opentelemetry.trace import Span from letta.adapters.letta_llm_adapter import LettaLLMAdapter -from letta.adapters.letta_llm_request_adapter import LettaLLMRequestAdapter from letta.adapters.simple_llm_request_adapter import SimpleLLMRequestAdapter from letta.adapters.simple_llm_stream_adapter import SimpleLLMStreamAdapter from letta.agents.helpers import ( @@ -89,7 +88,7 @@ class LettaAgentV3(LettaAgentV2): messages=in_context_messages + self.response_messages, input_messages_to_persist=input_messages_to_persist, # TODO need to support non-streaming adapter too - llm_adapter=LettaLLMRequestAdapter(llm_client=self.llm_client, llm_config=self.agent_state.llm_config), + llm_adapter=SimpleLLMRequestAdapter(llm_client=self.llm_client, llm_config=self.agent_state.llm_config), run_id=run_id, # use_assistant_message=use_assistant_message, include_return_message_types=include_return_message_types,