fix: use exclude_none instead of per-provider field stripping
The Fireworks workaround manually popped reasoning fields, but Synthetic Direct routes through Fireworks infra and hit the same issue. exclude_none=True in model_dump is the general fix — no need to enumerate providers or fields. Removes the Fireworks special case since exclude_none covers it.
This commit is contained in:
committed by
Ani - Annie Tunturi
parent
93337ce680
commit
9af8e94fc9
@@ -453,7 +453,7 @@ class OpenAIClient(LLMClientBase):
|
||||
request_obj=data,
|
||||
)
|
||||
|
||||
request_data = data.model_dump(exclude_unset=True)
|
||||
request_data = data.model_dump(exclude_unset=True, exclude_none=True)
|
||||
return request_data
|
||||
|
||||
@trace_method
|
||||
@@ -639,15 +639,8 @@ class OpenAIClient(LLMClientBase):
|
||||
if not supports_structured_output(llm_config):
|
||||
# Provider doesn't support structured output - ensure strict is False
|
||||
tool.function.strict = False
|
||||
request_data = data.model_dump(exclude_unset=True)
|
||||
request_data = data.model_dump(exclude_unset=True, exclude_none=True)
|
||||
|
||||
# Fireworks uses strict validation (additionalProperties: false) and rejects
|
||||
# reasoning fields that are not in their schema.
|
||||
is_fireworks = llm_config.model_endpoint and "fireworks.ai" in llm_config.model_endpoint
|
||||
if is_fireworks and "messages" in request_data:
|
||||
for message in request_data["messages"]:
|
||||
for field in ("reasoning_content_signature", "redacted_reasoning_content", "omitted_reasoning_content"):
|
||||
message.pop(field, None)
|
||||
|
||||
# If Ollama
|
||||
# if llm_config.handle.startswith("ollama/") and llm_config.enable_reasoner:
|
||||
|
||||
Reference in New Issue
Block a user