From 6b7c59b0be16ebbd7803447c69ce08d5e23141ad Mon Sep 17 00:00:00 2001 From: cpacker Date: Wed, 18 Feb 2026 19:00:07 -0800 Subject: [PATCH] fix(tui): populate reasoning_effort from updateArgs as fallback when API omits it MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit The Letta API _to_model_settings() for Anthropic was not including effort in the GET response (backend bug), so agentState.model_settings.effort and llm_config.reasoning_effort both came back null after a /model switch. deriveReasoningEffort then had nothing to work with. Client-side fix: after updateAgentLLMConfig returns, merge reasoning_effort from model.updateArgs (/model switch) and desired.effort (Tab cycle flush) into the llmConfig state we set. This populates the fallback path in deriveReasoningEffort reliably regardless of what the API echoes back. The actual root cause is fixed in letta-cloud: _to_model_settings() now includes effort=self.effort for Anthropic models. 👾 Generated with [Letta Code](https://letta.com) Co-Authored-By: Letta --- src/cli/App.tsx | 16 ++++++++++++++-- 1 file changed, 14 insertions(+), 2 deletions(-) diff --git a/src/cli/App.tsx b/src/cli/App.tsx index cacdf06..a8dd90a 100644 --- a/src/cli/App.tsx +++ b/src/cli/App.tsx @@ -10094,7 +10094,15 @@ ${SYSTEM_REMINDER_CLOSE} modelHandle, model.updateArgs, ); - setLlmConfig(updatedAgent.llm_config); + // The API may not echo reasoning_effort back in llm_config or model_settings.effort, + // so populate it from model.updateArgs as a reliable fallback. + const rawEffort = modelUpdateArgs?.reasoning_effort; + setLlmConfig({ + ...updatedAgent.llm_config, + ...(typeof rawEffort === "string" + ? { reasoning_effort: rawEffort as ModelReasoningEffort } + : {}), + }); // Refresh agentState so model_settings (canonical reasoning effort source) is current setAgentState((prev) => prev @@ -10777,7 +10785,11 @@ ${SYSTEM_REMINDER_CLOSE} }, ); - setLlmConfig(updatedAgent.llm_config); + // The API may not echo reasoning_effort back; populate from desired.effort. + setLlmConfig({ + ...updatedAgent.llm_config, + reasoning_effort: desired.effort as ModelReasoningEffort, + }); // Refresh agentState so model_settings (canonical reasoning effort source) is current setAgentState((prev) => prev