fix: do not pass temperature to request if model is oai reasoning model (#2189)

Co-authored-by: Charles Packer <packercharles@gmail.com>
This commit is contained in:
Shangyin Tan
2025-05-24 21:34:18 -07:00
committed by GitHub
parent 098b2efe7b
commit 2199d8fdda
3 changed files with 7 additions and 6 deletions

View File

@@ -58,9 +58,10 @@ class OpenAIStreamingInterface:
def get_tool_call_object(self) -> ToolCall:
"""Useful for agent loop"""
function_name = self.last_flushed_function_name if self.last_flushed_function_name else self.function_name_buffer
return ToolCall(
id=self.letta_tool_message_id,
function=FunctionCall(arguments=self.current_function_arguments, name=self.last_flushed_function_name),
function=FunctionCall(arguments=self.current_function_arguments, name=function_name),
)
async def process(self, stream: AsyncStream[ChatCompletionChunk]) -> AsyncGenerator[LettaMessage, None]:

View File

@@ -226,7 +226,7 @@ def build_openai_chat_completions_request(
tool_choice=tool_choice,
user=str(user_id),
max_completion_tokens=llm_config.max_tokens,
temperature=llm_config.temperature if supports_temperature_param(model) else None,
temperature=llm_config.temperature if supports_temperature_param(model) else 1.0,
reasoning_effort=llm_config.reasoning_effort,
)
else:
@@ -237,7 +237,7 @@ def build_openai_chat_completions_request(
function_call=function_call,
user=str(user_id),
max_completion_tokens=llm_config.max_tokens,
temperature=1.0 if llm_config.enable_reasoner else llm_config.temperature,
temperature=llm_config.temperature if supports_temperature_param(model) else 1.0,
reasoning_effort=llm_config.reasoning_effort,
)
# https://platform.openai.com/docs/guides/text-generation/json-mode

View File

@@ -41,7 +41,7 @@ def is_openai_reasoning_model(model: str) -> bool:
"""Utility function to check if the model is a 'reasoner'"""
# NOTE: needs to be updated with new model releases
is_reasoning = model.startswith("o1") or model.startswith("o3")
is_reasoning = model.startswith("o1") or model.startswith("o3") or model.startswith("o4")
return is_reasoning
@@ -187,9 +187,9 @@ class OpenAIClient(LLMClientBase):
tool_choice=tool_choice,
user=str(),
max_completion_tokens=llm_config.max_tokens,
temperature=llm_config.temperature if supports_temperature_param(model) else None,
# NOTE: the reasoners that don't support temperature require 1.0, not None
temperature=llm_config.temperature if supports_temperature_param(model) else 1.0,
)
# always set user id for openai requests
if self.actor:
data.user = self.actor.id