fix: put inner thoughts in kwargs for anthropic (#1911)

This commit is contained in:
cthomas
2025-04-28 16:37:29 -07:00
committed by GitHub
parent ab30329b6d
commit a9cd78cc3b
5 changed files with 11 additions and 2 deletions

View File

@@ -327,6 +327,9 @@ def create(
if not use_tool_naming:
raise NotImplementedError("Only tool calling supported on Anthropic API requests")
if llm_config.enable_reasoner:
llm_config.put_inner_thoughts_in_kwargs = False
# Force tool calling
tool_call = None
if functions is None:

View File

@@ -111,6 +111,9 @@ class LLMConfig(BaseModel):
if is_openai_reasoning_model(model):
values["put_inner_thoughts_in_kwargs"] = False
if values.get("enable_reasoner") and values.get("model_endpoint_type") == "anthropic":
values["put_inner_thoughts_in_kwargs"] = False
return values
@model_validator(mode="after")

View File

@@ -744,7 +744,8 @@ class AnthropicProvider(Provider):
# reliable for tool calling (no chance of a non-tool call step)
# Since tool_choice_type 'any' doesn't work with in-content COT
# NOTE For Haiku, it can be flaky if we don't enable this by default
inner_thoughts_in_kwargs = True if "haiku" in model["id"] else False
# inner_thoughts_in_kwargs = True if "haiku" in model["id"] else False
inner_thoughts_in_kwargs = True # we no longer support thinking tags
configs.append(
LLMConfig(

View File

@@ -1211,6 +1211,8 @@ class SyncServer(Server):
llm_config.max_reasoning_tokens = max_reasoning_tokens
if enable_reasoner is not None:
llm_config.enable_reasoner = enable_reasoner
if enable_reasoner and llm_config.model_endpoint_type == "anthropic":
llm_config.put_inner_thoughts_in_kwargs = False
return llm_config

View File

@@ -4,7 +4,7 @@
"model_endpoint": "https://api.anthropic.com/v1",
"model_wrapper": null,
"context_window": 200000,
"put_inner_thoughts_in_kwargs": false,
"put_inner_thoughts_in_kwargs": true,
"enable_reasoner": true,
"max_reasoning_tokens": 1024
}