fix: use exclude_none instead of per-provider field stripping

The Fireworks workaround manually popped reasoning fields, but
Synthetic Direct routes through Fireworks infra and hit the same
issue. exclude_none=True in model_dump is the general fix — no
need to enumerate providers or fields. Removes the Fireworks
special case since exclude_none covers it.
This commit is contained in:
Ani Tunturi
2026-03-21 12:42:03 -04:00
committed by Ani - Annie Tunturi
parent 93337ce680
commit 9af8e94fc9

View File

@@ -453,7 +453,7 @@ class OpenAIClient(LLMClientBase):
request_obj=data, request_obj=data,
) )
request_data = data.model_dump(exclude_unset=True) request_data = data.model_dump(exclude_unset=True, exclude_none=True)
return request_data return request_data
@trace_method @trace_method
@@ -639,15 +639,8 @@ class OpenAIClient(LLMClientBase):
if not supports_structured_output(llm_config): if not supports_structured_output(llm_config):
# Provider doesn't support structured output - ensure strict is False # Provider doesn't support structured output - ensure strict is False
tool.function.strict = False tool.function.strict = False
request_data = data.model_dump(exclude_unset=True) request_data = data.model_dump(exclude_unset=True, exclude_none=True)
# Fireworks uses strict validation (additionalProperties: false) and rejects
# reasoning fields that are not in their schema.
is_fireworks = llm_config.model_endpoint and "fireworks.ai" in llm_config.model_endpoint
if is_fireworks and "messages" in request_data:
for message in request_data["messages"]:
for field in ("reasoning_content_signature", "redacted_reasoning_content", "omitted_reasoning_content"):
message.pop(field, None)
# If Ollama # If Ollama
# if llm_config.handle.startswith("ollama/") and llm_config.enable_reasoner: # if llm_config.handle.startswith("ollama/") and llm_config.enable_reasoner: