fix: send Anthropic effort in model_settings and read it for display (#1016)

Co-authored-by: Letta <noreply@letta.com>
This commit is contained in:
Charles Packer
2026-02-18 12:57:52 -08:00
committed by GitHub
parent 4e1827ebc1
commit bdc23932b5
2 changed files with 59 additions and 14 deletions

View File

@@ -70,6 +70,14 @@ function buildModelSettings(
provider_type: "anthropic",
parallel_tool_calls: true,
};
// Map reasoning_effort to Anthropic's effort field (controls token spending via output_config)
const effort = updateArgs?.reasoning_effort;
if (effort === "low" || effort === "medium" || effort === "high") {
anthropicSettings.effort = effort;
} else if (effort === "xhigh") {
// "max" is valid on the backend but the SDK type hasn't caught up yet
(anthropicSettings as Record<string, unknown>).effort = "max";
}
// Build thinking config if either enable_reasoner or max_reasoning_tokens is specified
if (
updateArgs?.enable_reasoner !== undefined ||
@@ -126,6 +134,13 @@ function buildModelSettings(
provider_type: "bedrock",
parallel_tool_calls: true,
};
// Map reasoning_effort to Anthropic's effort field (Bedrock runs Claude models)
const effort = updateArgs?.reasoning_effort;
if (effort === "low" || effort === "medium" || effort === "high") {
bedrockSettings.effort = effort;
} else if (effort === "xhigh") {
bedrockSettings.effort = "max";
}
// Build thinking config if either enable_reasoner or max_reasoning_tokens is specified
if (
updateArgs?.enable_reasoner !== undefined ||

View File

@@ -1295,20 +1295,50 @@ export default function App({
currentModelLabel.split("/").pop())
: null;
const currentModelProvider = llmConfig?.provider_name ?? null;
const llmReasoningEffort = llmConfig?.reasoning_effort;
const llmEnableReasoner = (llmConfig as { enable_reasoner?: boolean | null })
?.enable_reasoner;
const currentReasoningEffort: ModelReasoningEffort | null =
llmReasoningEffort === "none" ||
llmReasoningEffort === "minimal" ||
llmReasoningEffort === "low" ||
llmReasoningEffort === "medium" ||
llmReasoningEffort === "high" ||
llmReasoningEffort === "xhigh"
? llmReasoningEffort
: llmEnableReasoner === false
? "none"
: null;
// Derive reasoning effort from model_settings (preferred over deprecated llm_config)
const currentReasoningEffort: ModelReasoningEffort | null = (() => {
const ms = agentState?.model_settings;
if (ms && "provider_type" in ms) {
// OpenAI/OpenRouter: reasoning.reasoning_effort
if (ms.provider_type === "openai" && "reasoning" in ms && ms.reasoning) {
const re = (ms.reasoning as { reasoning_effort?: string })
.reasoning_effort;
if (
re === "none" ||
re === "minimal" ||
re === "low" ||
re === "medium" ||
re === "high" ||
re === "xhigh"
)
return re;
}
// Anthropic/Bedrock: effort field (maps to output_config.effort in the API)
if (ms.provider_type === "anthropic" || ms.provider_type === "bedrock") {
const effort = (ms as { effort?: string | null }).effort;
if (effort === "low" || effort === "medium" || effort === "high")
return effort;
if (effort === "max") return "xhigh";
}
}
// Fallback: deprecated llm_config fields
const re = llmConfig?.reasoning_effort;
if (
re === "none" ||
re === "minimal" ||
re === "low" ||
re === "medium" ||
re === "high" ||
re === "xhigh"
)
return re;
if (
(llmConfig as { enable_reasoner?: boolean | null })?.enable_reasoner ===
false
)
return "none";
return null;
})();
// Billing tier for conditional UI and error context (fetched once on mount)
const [billingTier, setBillingTier] = useState<string | null>(null);