From bdc23932b5d8ed3666052a1ebf0320287456ef00 Mon Sep 17 00:00:00 2001 From: Charles Packer Date: Wed, 18 Feb 2026 12:57:52 -0800 Subject: [PATCH] fix: send Anthropic effort in model_settings and read it for display (#1016) Co-authored-by: Letta --- src/agent/modify.ts | 15 ++++++++++++ src/cli/App.tsx | 58 ++++++++++++++++++++++++++++++++++----------- 2 files changed, 59 insertions(+), 14 deletions(-) diff --git a/src/agent/modify.ts b/src/agent/modify.ts index 64eb986..c941af7 100644 --- a/src/agent/modify.ts +++ b/src/agent/modify.ts @@ -70,6 +70,14 @@ function buildModelSettings( provider_type: "anthropic", parallel_tool_calls: true, }; + // Map reasoning_effort to Anthropic's effort field (controls token spending via output_config) + const effort = updateArgs?.reasoning_effort; + if (effort === "low" || effort === "medium" || effort === "high") { + anthropicSettings.effort = effort; + } else if (effort === "xhigh") { + // "max" is valid on the backend but the SDK type hasn't caught up yet + (anthropicSettings as Record).effort = "max"; + } // Build thinking config if either enable_reasoner or max_reasoning_tokens is specified if ( updateArgs?.enable_reasoner !== undefined || @@ -126,6 +134,13 @@ function buildModelSettings( provider_type: "bedrock", parallel_tool_calls: true, }; + // Map reasoning_effort to Anthropic's effort field (Bedrock runs Claude models) + const effort = updateArgs?.reasoning_effort; + if (effort === "low" || effort === "medium" || effort === "high") { + bedrockSettings.effort = effort; + } else if (effort === "xhigh") { + bedrockSettings.effort = "max"; + } // Build thinking config if either enable_reasoner or max_reasoning_tokens is specified if ( updateArgs?.enable_reasoner !== undefined || diff --git a/src/cli/App.tsx b/src/cli/App.tsx index c38a297..0ac0e78 100644 --- a/src/cli/App.tsx +++ b/src/cli/App.tsx @@ -1295,20 +1295,50 @@ export default function App({ currentModelLabel.split("/").pop()) : null; const currentModelProvider = llmConfig?.provider_name ?? null; - const llmReasoningEffort = llmConfig?.reasoning_effort; - const llmEnableReasoner = (llmConfig as { enable_reasoner?: boolean | null }) - ?.enable_reasoner; - const currentReasoningEffort: ModelReasoningEffort | null = - llmReasoningEffort === "none" || - llmReasoningEffort === "minimal" || - llmReasoningEffort === "low" || - llmReasoningEffort === "medium" || - llmReasoningEffort === "high" || - llmReasoningEffort === "xhigh" - ? llmReasoningEffort - : llmEnableReasoner === false - ? "none" - : null; + // Derive reasoning effort from model_settings (preferred over deprecated llm_config) + const currentReasoningEffort: ModelReasoningEffort | null = (() => { + const ms = agentState?.model_settings; + if (ms && "provider_type" in ms) { + // OpenAI/OpenRouter: reasoning.reasoning_effort + if (ms.provider_type === "openai" && "reasoning" in ms && ms.reasoning) { + const re = (ms.reasoning as { reasoning_effort?: string }) + .reasoning_effort; + if ( + re === "none" || + re === "minimal" || + re === "low" || + re === "medium" || + re === "high" || + re === "xhigh" + ) + return re; + } + // Anthropic/Bedrock: effort field (maps to output_config.effort in the API) + if (ms.provider_type === "anthropic" || ms.provider_type === "bedrock") { + const effort = (ms as { effort?: string | null }).effort; + if (effort === "low" || effort === "medium" || effort === "high") + return effort; + if (effort === "max") return "xhigh"; + } + } + // Fallback: deprecated llm_config fields + const re = llmConfig?.reasoning_effort; + if ( + re === "none" || + re === "minimal" || + re === "low" || + re === "medium" || + re === "high" || + re === "xhigh" + ) + return re; + if ( + (llmConfig as { enable_reasoner?: boolean | null })?.enable_reasoner === + false + ) + return "none"; + return null; + })(); // Billing tier for conditional UI and error context (fetched once on mount) const [billingTier, setBillingTier] = useState(null);