fix: surface GPT-5.3 Codex for ChatGPT OAuth providers (#9379)
This commit is contained in:
committed by
Caren Thomas
parent
526da4c49b
commit
b0e16ae50f
@@ -36,6 +36,8 @@ TOKEN_REFRESH_BUFFER_SECONDS = 300
|
||||
# Model list based on opencode-openai-codex-auth plugin presets
|
||||
# Reasoning effort levels are configured via llm_config.reasoning_effort
|
||||
CHATGPT_MODELS = [
|
||||
# GPT-5.3 codex
|
||||
{"name": "gpt-5.3-codex", "context_window": 272000},
|
||||
# GPT-5.2 models (supports none/low/medium/high/xhigh reasoning)
|
||||
{"name": "gpt-5.2", "context_window": 272000},
|
||||
{"name": "gpt-5.2-codex", "context_window": 272000},
|
||||
|
||||
@@ -1333,8 +1333,31 @@ class SyncServer(object):
|
||||
# Get typed provider to access schema defaults (e.g., base_url)
|
||||
typed_provider = provider.cast_to_subtype()
|
||||
|
||||
# Sync models if not synced yet
|
||||
if provider.last_synced is None:
|
||||
provider_llm_models = None
|
||||
should_sync_models = provider.last_synced is None
|
||||
|
||||
# ChatGPT OAuth uses a hardcoded model list. If that list changes,
|
||||
# backfill already-synced providers that are missing new handles.
|
||||
if (
|
||||
provider.provider_type == ProviderType.chatgpt_oauth
|
||||
and not should_sync_models
|
||||
):
|
||||
expected_models = await typed_provider.list_llm_models_async()
|
||||
expected_handles = {model.handle for model in expected_models}
|
||||
provider_llm_models = await self.provider_manager.list_models_async(
|
||||
actor=actor,
|
||||
model_type="llm",
|
||||
provider_id=provider.id,
|
||||
enabled=True,
|
||||
)
|
||||
existing_handles = {
|
||||
model.handle for model in provider_llm_models
|
||||
}
|
||||
should_sync_models = not expected_handles.issubset(
|
||||
existing_handles
|
||||
)
|
||||
|
||||
if should_sync_models:
|
||||
models = await typed_provider.list_llm_models_async()
|
||||
embedding_models = await typed_provider.list_embedding_models_async()
|
||||
await self.provider_manager.sync_provider_models_async(
|
||||
@@ -1346,12 +1369,13 @@ class SyncServer(object):
|
||||
await self.provider_manager.update_provider_last_synced_async(provider.id, actor=actor)
|
||||
|
||||
# Read from database
|
||||
provider_llm_models = await self.provider_manager.list_models_async(
|
||||
actor=actor,
|
||||
model_type="llm",
|
||||
provider_id=provider.id,
|
||||
enabled=True,
|
||||
)
|
||||
if provider_llm_models is None:
|
||||
provider_llm_models = await self.provider_manager.list_models_async(
|
||||
actor=actor,
|
||||
model_type="llm",
|
||||
provider_id=provider.id,
|
||||
enabled=True,
|
||||
)
|
||||
for model in provider_llm_models:
|
||||
llm_config = LLMConfig(
|
||||
model=model.name,
|
||||
|
||||
@@ -19,10 +19,16 @@ from letta.schemas.providers import (
|
||||
VLLMProvider,
|
||||
ZAIProvider,
|
||||
)
|
||||
from letta.schemas.providers.chatgpt_oauth import CHATGPT_MODELS
|
||||
from letta.schemas.secret import Secret
|
||||
from letta.settings import model_settings
|
||||
|
||||
|
||||
def test_chatgpt_oauth_model_allowlist_includes_gpt_5_3_codex():
|
||||
model_names = {model["name"] for model in CHATGPT_MODELS}
|
||||
assert "gpt-5.3-codex" in model_names
|
||||
|
||||
|
||||
def test_openai():
|
||||
provider = OpenAIProvider(
|
||||
name="openai",
|
||||
|
||||
@@ -1,5 +1,6 @@
|
||||
"""Tests for provider initialization via ProviderManager.sync_base_providers and provider model persistence."""
|
||||
|
||||
import json
|
||||
import uuid
|
||||
from unittest.mock import AsyncMock, MagicMock, patch
|
||||
|
||||
@@ -2660,6 +2661,70 @@ async def test_byok_provider_last_synced_skips_sync_when_set(default_user, provi
|
||||
assert f"test-byok-cached-{test_id}/gpt-4o" in byok_handles
|
||||
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_chatgpt_oauth_byok_resyncs_when_allowlist_expands(
|
||||
default_user, provider_manager
|
||||
):
|
||||
"""ChatGPT OAuth providers should backfill newly added hardcoded models."""
|
||||
test_id = generate_test_id()
|
||||
provider_name = f"test-chatgpt-oauth-{test_id}"
|
||||
|
||||
oauth_credentials = json.dumps(
|
||||
{
|
||||
"access_token": "test-access-token",
|
||||
"refresh_token": "test-refresh-token",
|
||||
"account_id": "test-account-id",
|
||||
"expires_at": 4_102_444_800, # year 2100 (seconds)
|
||||
}
|
||||
)
|
||||
|
||||
byok_provider = await provider_manager.create_provider_async(
|
||||
ProviderCreate(
|
||||
name=provider_name,
|
||||
provider_type=ProviderType.chatgpt_oauth,
|
||||
api_key=oauth_credentials,
|
||||
),
|
||||
actor=default_user,
|
||||
is_byok=True,
|
||||
)
|
||||
|
||||
# Simulate a stale provider model cache that predates gpt-5.3-codex.
|
||||
stale_models = [
|
||||
LLMConfig(
|
||||
model="gpt-5.2-codex",
|
||||
model_endpoint_type="chatgpt_oauth",
|
||||
model_endpoint="https://chatgpt.com/backend-api/codex/responses",
|
||||
context_window=272000,
|
||||
handle=f"{provider_name}/gpt-5.2-codex",
|
||||
provider_name=provider_name,
|
||||
provider_category=ProviderCategory.byok,
|
||||
)
|
||||
]
|
||||
await provider_manager.sync_provider_models_async(
|
||||
provider=byok_provider,
|
||||
llm_models=stale_models,
|
||||
embedding_models=[],
|
||||
organization_id=default_user.organization_id,
|
||||
)
|
||||
await provider_manager.update_provider_last_synced_async(
|
||||
byok_provider.id, actor=default_user
|
||||
)
|
||||
|
||||
server = SyncServer(init_with_default_org_and_user=False)
|
||||
server.default_user = default_user
|
||||
server.provider_manager = provider_manager
|
||||
server._enabled_providers = []
|
||||
|
||||
byok_models = await server.list_llm_models_async(
|
||||
actor=default_user,
|
||||
provider_category=[ProviderCategory.byok],
|
||||
provider_name=provider_name,
|
||||
)
|
||||
|
||||
byok_handles = {model.handle for model in byok_models}
|
||||
assert f"{provider_name}/gpt-5.3-codex" in byok_handles
|
||||
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_base_provider_updates_last_synced_on_sync(default_user, provider_manager):
|
||||
"""Test that base provider sync updates the last_synced timestamp."""
|
||||
|
||||
Reference in New Issue
Block a user