fix: /model selection for shared-handle tiers (#859)

This commit is contained in:
paulbettner
2026-02-08 21:27:27 -06:00
committed by GitHub
parent c397a463cd
commit 7efa6f60b5
3 changed files with 108 additions and 5 deletions

View File

@@ -78,6 +78,55 @@ export function getModelInfo(modelIdentifier: string) {
return null;
}
/**
* Get model info by handle + llm_config.
*
* This exists because many model "tiers" (e.g. gpt-5.2-none/low/medium/high)
* share the same handle and differ only by updateArgs like reasoning_effort.
*
* When resuming a session we want `/model` to highlight the tier that actually
* matches the agent configuration.
*/
export function getModelInfoForLlmConfig(
modelHandle: string,
llmConfig?: {
reasoning_effort?: string | null;
enable_reasoner?: boolean | null;
} | null,
) {
// Try ID/handle direct resolution first.
const direct = getModelInfo(modelHandle);
// Collect all candidates that share this handle.
const candidates = models.filter((m) => m.handle === modelHandle);
if (candidates.length === 0) {
return direct;
}
const effort = llmConfig?.reasoning_effort ?? null;
if (effort) {
const match = candidates.find(
(m) =>
(m.updateArgs as { reasoning_effort?: unknown } | undefined)
?.reasoning_effort === effort,
);
if (match) return match;
}
// Anthropic-style toggle (best-effort; llm_config may not always include it)
if (llmConfig?.enable_reasoner === false) {
const match = candidates.find(
(m) =>
(m.updateArgs as { enable_reasoner?: unknown } | undefined)
?.enable_reasoner === false,
);
if (match) return match;
}
// Fall back to whatever models.json considers the default for this handle.
return direct ?? candidates[0] ?? null;
}
/**
* Get updateArgs for a model by ID or handle
* @param modelIdentifier - Can be either a model ID (e.g., "opus-4.5") or a full handle (e.g., "anthropic/claude-opus-4-5")

View File

@@ -2412,7 +2412,14 @@ export default function App({
agent.llm_config.model_endpoint_type && agent.llm_config.model
? `${agent.llm_config.model_endpoint_type}/${agent.llm_config.model}`
: agent.llm_config.model;
const modelInfo = getModelInfo(agentModelHandle || "");
const { getModelInfoForLlmConfig } = await import("../agent/model");
const modelInfo = getModelInfoForLlmConfig(
agentModelHandle || "",
agent.llm_config as unknown as {
reasoning_effort?: string | null;
enable_reasoner?: boolean | null;
},
);
if (modelInfo) {
setCurrentModelId(modelInfo.id);
} else {
@@ -3303,28 +3310,53 @@ export default function App({
const client = await getClient();
const agent = await client.agents.retrieve(agentIdRef.current);
// Check if the model has changed by comparing llm_config
// Keep model UI in sync with the agent configuration.
// Note: many tiers share the same handle (e.g. gpt-5.2-none/high), so we
// must also treat reasoning settings as model-affecting.
const currentModel = llmConfigRef.current?.model;
const currentEndpoint = llmConfigRef.current?.model_endpoint_type;
const currentEffort = llmConfigRef.current?.reasoning_effort;
const currentEnableReasoner = (
llmConfigRef.current as unknown as {
enable_reasoner?: boolean | null;
}
)?.enable_reasoner;
const agentModel = agent.llm_config.model;
const agentEndpoint = agent.llm_config.model_endpoint_type;
const agentEffort = agent.llm_config.reasoning_effort;
const agentEnableReasoner = (
agent.llm_config as unknown as {
enable_reasoner?: boolean | null;
}
)?.enable_reasoner;
if (
currentModel !== agentModel ||
currentEndpoint !== agentEndpoint
currentEndpoint !== agentEndpoint ||
currentEffort !== agentEffort ||
currentEnableReasoner !== agentEnableReasoner
) {
// Model has changed - update local state
setLlmConfig(agent.llm_config);
// Derive model ID from llm_config for ModelSelector
// Try to find matching model by handle in models.json
const { getModelInfo } = await import("../agent/model");
const { getModelInfoForLlmConfig } = await import(
"../agent/model"
);
const agentModelHandle =
agent.llm_config.model_endpoint_type && agent.llm_config.model
? `${agent.llm_config.model_endpoint_type}/${agent.llm_config.model}`
: agent.llm_config.model;
const modelInfo = getModelInfo(agentModelHandle || "");
const modelInfo = getModelInfoForLlmConfig(
agentModelHandle || "",
agent.llm_config as unknown as {
reasoning_effort?: string | null;
enable_reasoner?: boolean | null;
},
);
if (modelInfo) {
setCurrentModelId(modelInfo.id);
} else {

View File

@@ -0,0 +1,22 @@
import { describe, expect, test } from "bun:test";
import { getModelInfoForLlmConfig } from "../agent/model";
describe("getModelInfoForLlmConfig", () => {
test("selects gpt-5.2 tier by reasoning_effort", () => {
const handle = "openai/gpt-5.2";
const high = getModelInfoForLlmConfig(handle, { reasoning_effort: "high" });
expect(high?.id).toBe("gpt-5.2-high");
const none = getModelInfoForLlmConfig(handle, { reasoning_effort: "none" });
expect(none?.id).toBe("gpt-5.2-none");
});
test("falls back to first handle match when effort missing", () => {
const handle = "openai/gpt-5.2";
const info = getModelInfoForLlmConfig(handle, null);
// models.json order currently lists gpt-5.2-none first.
expect(info?.id).toBe("gpt-5.2-none");
});
});