fix: align codex plus-pro routing and gpt-5.3-codex defaults (#1009)

This commit is contained in:
Charles Packer
2026-02-17 23:19:28 -08:00
committed by GitHub
parent b505ea6117
commit f74dbd6080
4 changed files with 37 additions and 9 deletions

View File

@@ -193,7 +193,7 @@
"description": "GPT-5.3 Codex (no reasoning) via ChatGPT Plus/Pro",
"updateArgs": {
"reasoning_effort": "none",
"verbosity": "medium",
"verbosity": "low",
"context_window": 272000,
"max_output_tokens": 128000
}
@@ -205,7 +205,7 @@
"description": "GPT-5.3 Codex (low reasoning) via ChatGPT Plus/Pro",
"updateArgs": {
"reasoning_effort": "low",
"verbosity": "medium",
"verbosity": "low",
"context_window": 272000,
"max_output_tokens": 128000
}
@@ -217,7 +217,7 @@
"description": "GPT-5.3 Codex (med reasoning) via ChatGPT Plus/Pro",
"updateArgs": {
"reasoning_effort": "medium",
"verbosity": "medium",
"verbosity": "low",
"context_window": 272000,
"max_output_tokens": 128000
}
@@ -230,7 +230,7 @@
"isFeatured": true,
"updateArgs": {
"reasoning_effort": "high",
"verbosity": "medium",
"verbosity": "low",
"context_window": 272000,
"max_output_tokens": 128000
}
@@ -242,7 +242,7 @@
"description": "GPT-5.3 Codex (max reasoning) via ChatGPT Plus/Pro",
"updateArgs": {
"reasoning_effort": "xhigh",
"verbosity": "medium",
"verbosity": "low",
"context_window": 272000,
"max_output_tokens": 128000
}

View File

@@ -27,7 +27,7 @@ describe("getSubagentModelDisplay", () => {
"lc-anthropic/claude-haiku-4-5-20251001",
);
expect(display).toEqual({
label: "claude-haiku-4-5-20251001",
label: "Haiku 4.5",
isByokProvider: true,
isOpenAICodexProvider: false,
});

View File

@@ -0,0 +1,20 @@
import { describe, expect, test } from "bun:test";
import { isOpenAIModel } from "../../tools/manager";
describe("isOpenAIModel", () => {
test("detects openai handles", () => {
expect(isOpenAIModel("openai/gpt-5.2-codex")).toBe(true);
});
test("detects chatgpt-plus-pro handles", () => {
expect(isOpenAIModel("chatgpt-plus-pro/gpt-5.3-codex")).toBe(true);
});
test("detects chatgpt-plus-pro model ids via models.json metadata", () => {
expect(isOpenAIModel("gpt-5.3-codex-plus-pro-high")).toBe(true);
});
test("does not detect anthropic handles", () => {
expect(isOpenAIModel("anthropic/claude-sonnet-4-6")).toBe(false);
});
});

View File

@@ -7,6 +7,7 @@ import {
runPostToolUseHooks,
runPreToolUseHooks,
} from "../hooks";
import { OPENAI_CODEX_PROVIDER_NAME } from "../providers/openai-codex-provider";
import { telemetry } from "../telemetry";
import { debugLog } from "../utils/debug";
import { TOOL_DEFINITIONS, type ToolName } from "./toolDefinitions";
@@ -805,10 +806,17 @@ export async function loadTools(modelIdentifier?: string): Promise<void> {
export function isOpenAIModel(modelIdentifier: string): boolean {
const info = getModelInfo(modelIdentifier);
if (info?.handle && typeof info.handle === "string") {
return info.handle.startsWith("openai/");
return (
info.handle.startsWith("openai/") ||
info.handle.startsWith(`${OPENAI_CODEX_PROVIDER_NAME}/`)
);
}
// Fallback: treat raw handle-style identifiers as OpenAI if they start with openai/
return modelIdentifier.startsWith("openai/");
// Fallback: treat raw handle-style identifiers as OpenAI for openai/*
// and ChatGPT OAuth Codex provider handles.
return (
modelIdentifier.startsWith("openai/") ||
modelIdentifier.startsWith(`${OPENAI_CODEX_PROVIDER_NAME}/`)
);
}
export function isGeminiModel(modelIdentifier: string): boolean {