diff --git a/letta/schemas/llm_config.py b/letta/schemas/llm_config.py index 6b918846..afb000d9 100644 --- a/letta/schemas/llm_config.py +++ b/letta/schemas/llm_config.py @@ -187,6 +187,8 @@ class LLMConfig(BaseModel): @classmethod def apply_reasoning_setting_to_config(cls, config: "LLMConfig", reasoning: bool): if reasoning: + config.enable_reasoner = True + if ( config.model_endpoint_type == "anthropic" and ("claude-opus-4" in config.model or "claude-sonnet-4" in config.model or "claude-3-7-sonnet" in config.model) @@ -194,19 +196,19 @@ class LLMConfig(BaseModel): config.model_endpoint_type == "google_vertex" and ("gemini-2.5-flash" in config.model or "gemini-2.0-pro" in config.model) ): config.put_inner_thoughts_in_kwargs = False - config.enable_reasoner = True if config.max_reasoning_tokens == 0: config.max_reasoning_tokens = 1024 elif config.model_endpoint_type == "openai" and ( config.model.startswith("o1") or config.model.startswith("o3") or config.model.startswith("o4") ): - config.put_inner_thoughts_in_kwargs = True - config.enable_reasoner = True + config.put_inner_thoughts_in_kwargs = False if config.reasoning_effort is None: config.reasoning_effort = "medium" else: config.put_inner_thoughts_in_kwargs = True - config.enable_reasoner = False + else: - config.put_inner_thoughts_in_kwargs = False config.enable_reasoner = False + config.put_inner_thoughts_in_kwargs = False + + return config