diff --git a/letta/llm_api/openai_client.py b/letta/llm_api/openai_client.py index e615315b..0f36c77e 100644 --- a/letta/llm_api/openai_client.py +++ b/letta/llm_api/openai_client.py @@ -453,7 +453,7 @@ class OpenAIClient(LLMClientBase): request_obj=data, ) - request_data = data.model_dump(exclude_unset=True) + request_data = data.model_dump(exclude_unset=True, exclude_none=True) return request_data @trace_method @@ -639,15 +639,8 @@ class OpenAIClient(LLMClientBase): if not supports_structured_output(llm_config): # Provider doesn't support structured output - ensure strict is False tool.function.strict = False - request_data = data.model_dump(exclude_unset=True) + request_data = data.model_dump(exclude_unset=True, exclude_none=True) - # Fireworks uses strict validation (additionalProperties: false) and rejects - # reasoning fields that are not in their schema. - is_fireworks = llm_config.model_endpoint and "fireworks.ai" in llm_config.model_endpoint - if is_fireworks and "messages" in request_data: - for message in request_data["messages"]: - for field in ("reasoning_content_signature", "redacted_reasoning_content", "omitted_reasoning_content"): - message.pop(field, None) # If Ollama # if llm_config.handle.startswith("ollama/") and llm_config.enable_reasoner: