feat(core): add gpt-5.3-codex model support (#9628)

* feat(core): add gpt-5.3-codex model support

Add OpenAI gpt-5.3-codex model: context window overrides, model pricing
and capabilities, none-reasoning-effort support, and test config.

🐾 Generated with [Letta Code](https://letta.com)

Co-Authored-By: Letta <noreply@letta.com>

* just stage-api && just publish-api

---------

Co-authored-by: Letta <noreply@letta.com>
This commit is contained in:
Kevin Lin
2026-02-24 14:56:56 -08:00
committed by Caren Thomas
parent ddfa922cde
commit 895acb9f4e
5 changed files with 43 additions and 7 deletions

View File

@@ -278,6 +278,8 @@ LLM_MAX_CONTEXT_WINDOW = {
"gpt-5.2-pro": 272000,
"gpt-5.2-pro-2025-12-11": 272000,
"gpt-5.2-codex": 272000,
# gpt-5.3
"gpt-5.3-codex": 272000,
# reasoners
"o1": 200000,
# "o1-pro": 200000, # responses API only

View File

@@ -88,7 +88,7 @@ def supports_none_reasoning_effort(model: str) -> bool:
Currently, GPT-5.1 and GPT-5.2 models support the 'none' reasoning effort level.
"""
return model.startswith("gpt-5.1") or model.startswith("gpt-5.2")
return model.startswith("gpt-5.1") or model.startswith("gpt-5.2") or model.startswith("gpt-5.3")
def is_openai_5_model(model: str) -> bool:

View File

@@ -17295,6 +17295,32 @@
"supports_tool_choice": true,
"supports_vision": true
},
"gpt-5.3-codex": {
"cache_read_input_token_cost": 1.75e-7,
"cache_read_input_token_cost_priority": 3.5e-7,
"input_cost_per_token": 1.75e-6,
"input_cost_per_token_priority": 3.5e-6,
"litellm_provider": "openai",
"max_input_tokens": 272000,
"max_output_tokens": 128000,
"max_tokens": 128000,
"mode": "responses",
"output_cost_per_token": 1.4e-5,
"output_cost_per_token_priority": 2.8e-5,
"supported_endpoints": ["/v1/responses"],
"supported_modalities": ["text", "image"],
"supported_output_modalities": ["text"],
"supports_function_calling": true,
"supports_native_streaming": true,
"supports_parallel_function_calling": true,
"supports_pdf_input": true,
"supports_prompt_caching": true,
"supports_reasoning": true,
"supports_response_schema": true,
"supports_system_messages": false,
"supports_tool_choice": true,
"supports_vision": true
},
"gpt-5-mini": {
"cache_read_input_token_cost": 2.5e-8,
"cache_read_input_token_cost_flex": 1.25e-8,