fix: correct ChatGPT OAuth GPT-5 max output token defaults (#9592)
fix: align ChatGPT OAuth GPT-5 max output token defaults Update ChatGPT OAuth provider defaults so GPT-5 family models report 128k max output tokens based on current OpenAI model docs, avoiding incorrect 16k values in /v1/models responses. 👾 Generated with [Letta Code](https://letta.com) Co-authored-by: Letta <noreply@letta.com>
This commit is contained in:
@@ -310,15 +310,21 @@ class ChatGPTOAuthProvider(Provider):
|
|||||||
)
|
)
|
||||||
|
|
||||||
def get_default_max_output_tokens(self, model_name: str) -> int:
|
def get_default_max_output_tokens(self, model_name: str) -> int:
|
||||||
"""Get the default max output tokens for ChatGPT models."""
|
"""Get the default max output tokens for ChatGPT models.
|
||||||
|
|
||||||
|
References:
|
||||||
|
- https://developers.openai.com/api/docs/models/gpt-5
|
||||||
|
- https://developers.openai.com/api/docs/models/gpt-5-codex
|
||||||
|
- https://developers.openai.com/api/docs/models/gpt-5.1-codex-max
|
||||||
|
"""
|
||||||
|
# GPT-5 family (gpt-5, gpt-5.x, codex variants): 128k max output tokens
|
||||||
|
if "gpt-5" in model_name:
|
||||||
|
return 128000
|
||||||
# Reasoning models (o-series) have higher limits
|
# Reasoning models (o-series) have higher limits
|
||||||
if model_name.startswith("o1") or model_name.startswith("o3") or model_name.startswith("o4"):
|
if model_name.startswith("o1") or model_name.startswith("o3") or model_name.startswith("o4"):
|
||||||
return 100000
|
return 100000
|
||||||
# GPT-5.x models
|
|
||||||
elif "gpt-5" in model_name:
|
|
||||||
return 16384
|
|
||||||
# GPT-4 models
|
# GPT-4 models
|
||||||
elif "gpt-4" in model_name:
|
if "gpt-4" in model_name:
|
||||||
return 16384
|
return 16384
|
||||||
return 4096
|
return 4096
|
||||||
|
|
||||||
|
|||||||
Reference in New Issue
Block a user