From 9c8589a687270ff9284ad0a763ef6fe1612cad89 Mon Sep 17 00:00:00 2001 From: jnjpng Date: Fri, 20 Feb 2026 14:52:48 -0800 Subject: [PATCH] fix: correct ChatGPT OAuth GPT-5 max output token defaults (#9592) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit fix: align ChatGPT OAuth GPT-5 max output token defaults Update ChatGPT OAuth provider defaults so GPT-5 family models report 128k max output tokens based on current OpenAI model docs, avoiding incorrect 16k values in /v1/models responses. 👾 Generated with [Letta Code](https://letta.com) Co-authored-by: Letta --- letta/schemas/providers/chatgpt_oauth.py | 16 +++++++++++----- 1 file changed, 11 insertions(+), 5 deletions(-) diff --git a/letta/schemas/providers/chatgpt_oauth.py b/letta/schemas/providers/chatgpt_oauth.py index deb20c7c..43a9b2f4 100644 --- a/letta/schemas/providers/chatgpt_oauth.py +++ b/letta/schemas/providers/chatgpt_oauth.py @@ -310,15 +310,21 @@ class ChatGPTOAuthProvider(Provider): ) def get_default_max_output_tokens(self, model_name: str) -> int: - """Get the default max output tokens for ChatGPT models.""" + """Get the default max output tokens for ChatGPT models. + + References: + - https://developers.openai.com/api/docs/models/gpt-5 + - https://developers.openai.com/api/docs/models/gpt-5-codex + - https://developers.openai.com/api/docs/models/gpt-5.1-codex-max + """ + # GPT-5 family (gpt-5, gpt-5.x, codex variants): 128k max output tokens + if "gpt-5" in model_name: + return 128000 # Reasoning models (o-series) have higher limits if model_name.startswith("o1") or model_name.startswith("o3") or model_name.startswith("o4"): return 100000 - # GPT-5.x models - elif "gpt-5" in model_name: - return 16384 # GPT-4 models - elif "gpt-4" in model_name: + if "gpt-4" in model_name: return 16384 return 4096