feat: add reasoning settings step to /model (#1007)

This commit is contained in:
Charles Packer
2026-02-17 21:27:13 -08:00
committed by GitHub
parent 9b438bdd91
commit 245390adb0
8 changed files with 667 additions and 75 deletions

View File

@@ -43,6 +43,17 @@ export function getAvailableModelsCacheInfo(): {
};
}
/**
* Return cached model handles if available.
* Used by UI components to bootstrap from cache without showing a loading flash.
*/
export function getCachedModelHandles(): Set<string> | null {
if (!cache) {
return null;
}
return new Set(cache.handles);
}
/**
* Provider response from /v1/providers/ endpoint
*/

View File

@@ -5,6 +5,52 @@ import modelsData from "../models.json";
export const models = modelsData;
export type ModelReasoningEffort =
| "none"
| "minimal"
| "low"
| "medium"
| "high"
| "xhigh";
const REASONING_EFFORT_ORDER: ModelReasoningEffort[] = [
"none",
"minimal",
"low",
"medium",
"high",
"xhigh",
];
function isModelReasoningEffort(value: unknown): value is ModelReasoningEffort {
return (
typeof value === "string" &&
REASONING_EFFORT_ORDER.includes(value as ModelReasoningEffort)
);
}
export function getReasoningTierOptionsForHandle(modelHandle: string): Array<{
effort: ModelReasoningEffort;
modelId: string;
}> {
const byEffort = new Map<ModelReasoningEffort, string>();
for (const model of models) {
if (model.handle !== modelHandle) continue;
const effort = (model.updateArgs as { reasoning_effort?: unknown } | null)
?.reasoning_effort;
if (!isModelReasoningEffort(effort)) continue;
if (!byEffort.has(effort)) {
byEffort.set(effort, model.id);
}
}
return REASONING_EFFORT_ORDER.flatMap((effort) => {
const modelId = byEffort.get(effort);
return modelId ? [{ effort, modelId }] : [];
});
}
/**
* Resolve a model by ID or handle
* @param modelIdentifier - Can be either a model ID (e.g., "opus-4.5") or a full handle (e.g., "anthropic/claude-opus-4-5")

View File

@@ -54,9 +54,16 @@ function buildModelSettings(
| "minimal"
| "low"
| "medium"
| "high",
| "high"
| "xhigh",
};
}
const verbosity = updateArgs?.verbosity;
if (verbosity === "low" || verbosity === "medium" || verbosity === "high") {
// The backend supports verbosity for OpenAI-family providers; the generated
// client type may lag this field, so set it via a narrow record cast.
(openaiSettings as Record<string, unknown>).verbosity = verbosity;
}
settings = openaiSettings;
} else if (isAnthropic) {
const anthropicSettings: AnthropicModelSettings = {

View File

@@ -52,7 +52,11 @@ import {
getMemoryFilesystemRoot,
} from "../agent/memoryFilesystem";
import { sendMessageStream } from "../agent/message";
import { getModelInfo, getModelShortName } from "../agent/model";
import {
getModelInfo,
getModelShortName,
type ModelReasoningEffort,
} from "../agent/model";
import { INTERRUPT_RECOVERY_ALERT } from "../agent/promptAssets";
import { SessionStats } from "../agent/stats";
import {
@@ -130,6 +134,7 @@ import { McpSelector } from "./components/McpSelector";
import { MemfsTreeViewer } from "./components/MemfsTreeViewer";
import { MemoryTabViewer } from "./components/MemoryTabViewer";
import { MessageSearch } from "./components/MessageSearch";
import { ModelReasoningSelector } from "./components/ModelReasoningSelector";
import { ModelSelector } from "./components/ModelSelector";
import { NewAgentDialog } from "./components/NewAgentDialog";
import { PendingApprovalStub } from "./components/PendingApprovalStub";
@@ -1199,6 +1204,11 @@ export default function App({
filterProvider?: string;
forceRefresh?: boolean;
}>({});
const [modelReasoningPrompt, setModelReasoningPrompt] = useState<{
modelLabel: string;
initialModelId: string;
options: Array<{ effort: ModelReasoningEffort; modelId: string }>;
} | null>(null);
const closeOverlay = useCallback(() => {
const pending = pendingOverlayCommandRef.current;
if (pending && pending.overlay === activeOverlay) {
@@ -1209,6 +1219,7 @@ export default function App({
setFeedbackPrefill("");
setSearchQuery("");
setModelSelectorOptions({});
setModelReasoningPrompt(null);
}, [activeOverlay]);
// Queued overlay action - executed after end_turn when user makes a selection
@@ -9567,22 +9578,65 @@ ${SYSTEM_REMINDER_CLOSE}
}, [pendingApprovals, refreshDerived, queueApprovalResults]);
const handleModelSelect = useCallback(
async (modelId: string, commandId?: string | null) => {
const overlayCommand = commandId
async (
modelId: string,
commandId?: string | null,
opts?: { skipReasoningPrompt?: boolean },
) => {
let overlayCommand = commandId
? commandRunner.getHandle(commandId, "/model")
: consumeOverlayCommand("model");
: null;
const resolveOverlayCommand = () => {
if (overlayCommand) {
return overlayCommand;
}
overlayCommand = consumeOverlayCommand("model");
return overlayCommand;
};
let selectedModel: {
id: string;
handle?: string;
label: string;
updateArgs?: { context_window?: number };
updateArgs?: Record<string, unknown>;
} | null = null;
try {
const { models } = await import("../agent/model");
const { getReasoningTierOptionsForHandle, models } = await import(
"../agent/model"
);
const pickPreferredModelForHandle = (handle: string) => {
const candidates = models.filter((m) => m.handle === handle);
return (
candidates.find((m) => m.isDefault) ??
candidates.find((m) => m.isFeatured) ??
candidates.find(
(m) =>
(m.updateArgs as { reasoning_effort?: unknown } | undefined)
?.reasoning_effort === "medium",
) ??
candidates.find(
(m) =>
(m.updateArgs as { reasoning_effort?: unknown } | undefined)
?.reasoning_effort === "high",
) ??
candidates[0] ??
null
);
};
selectedModel = models.find((m) => m.id === modelId) ?? null;
if (!selectedModel && modelId.includes("/")) {
const handleMatch = pickPreferredModelForHandle(modelId);
if (handleMatch) {
selectedModel = {
...handleMatch,
id: modelId,
handle: modelId,
} as unknown as (typeof models)[number];
}
}
if (!selectedModel && modelId.includes("/")) {
const { getModelContextWindow } = await import(
"../agent/available-models"
@@ -9602,17 +9656,60 @@ ${SYSTEM_REMINDER_CLOSE}
if (!selectedModel) {
const output = `Model not found: ${modelId}. Run /model and press R to refresh available models.`;
const cmd = overlayCommand ?? commandRunner.start("/model", output);
const cmd =
resolveOverlayCommand() ?? commandRunner.start("/model", output);
cmd.fail(output);
return;
}
const model = selectedModel;
const modelHandle = model.handle ?? model.id;
const modelUpdateArgs = model.updateArgs as
| { reasoning_effort?: unknown; enable_reasoner?: unknown }
| undefined;
const rawReasoningEffort = modelUpdateArgs?.reasoning_effort;
const reasoningLevel =
typeof rawReasoningEffort === "string"
? rawReasoningEffort === "none"
? "no"
: rawReasoningEffort === "xhigh"
? "max"
: rawReasoningEffort
: modelUpdateArgs?.enable_reasoner === false
? "no"
: null;
const reasoningTierOptions =
getReasoningTierOptionsForHandle(modelHandle);
if (
!opts?.skipReasoningPrompt &&
activeOverlay === "model" &&
reasoningTierOptions.length > 1
) {
const selectedEffort = (
model.updateArgs as { reasoning_effort?: unknown } | undefined
)?.reasoning_effort;
const preferredOption =
(typeof selectedEffort === "string" &&
reasoningTierOptions.find(
(option) => option.effort === selectedEffort,
)) ??
reasoningTierOptions.find((option) => option.effort === "medium") ??
reasoningTierOptions[0];
if (preferredOption) {
setModelReasoningPrompt({
modelLabel: model.label,
initialModelId: preferredOption.modelId,
options: reasoningTierOptions,
});
return;
}
}
if (isAgentBusy()) {
setActiveOverlay(null);
const cmd =
overlayCommand ??
resolveOverlayCommand() ??
commandRunner.start(
"/model",
`Model switch queued will switch after current task completes`,
@@ -9631,7 +9728,7 @@ ${SYSTEM_REMINDER_CLOSE}
await withCommandLock(async () => {
const cmd =
overlayCommand ??
resolveOverlayCommand() ??
commandRunner.start(
"/model",
`Switching model to ${model.label}...`,
@@ -9686,7 +9783,7 @@ ${SYSTEM_REMINDER_CLOSE}
? `Automatically switched toolset to ${toolsetName}. Use /toolset to change back if desired.\nConsider switching to a different system prompt using /system to match.`
: null;
const outputLines = [
`Switched to ${model.label}`,
`Switched to ${model.label}${reasoningLevel ? ` (${reasoningLevel} reasoning)` : ""}`,
...(autoToolsetLine ? [autoToolsetLine] : []),
].join("\n");
@@ -9698,7 +9795,7 @@ ${SYSTEM_REMINDER_CLOSE}
const guidance =
"Run /model and press R to refresh available models. If the model is still unavailable, choose another model or connect a provider with /connect.";
const cmd =
overlayCommand ??
resolveOverlayCommand() ??
commandRunner.start(
"/model",
`Failed to switch model to ${modelLabel}.`,
@@ -9709,6 +9806,7 @@ ${SYSTEM_REMINDER_CLOSE}
}
},
[
activeOverlay,
agentId,
commandRunner,
consumeOverlayCommand,
@@ -11103,24 +11201,38 @@ Plan file path: ${planFilePath}`;
</Box>
{/* Model Selector - conditionally mounted as overlay */}
{activeOverlay === "model" && (
<ModelSelector
currentModelId={currentModelId ?? undefined}
onSelect={handleModelSelect}
onCancel={closeOverlay}
filterProvider={modelSelectorOptions.filterProvider}
forceRefresh={modelSelectorOptions.forceRefresh}
billingTier={billingTier ?? undefined}
isSelfHosted={(() => {
const settings = settingsManager.getSettings();
const baseURL =
process.env.LETTA_BASE_URL ||
settings.env?.LETTA_BASE_URL ||
"https://api.letta.com";
return !baseURL.includes("api.letta.com");
})()}
/>
)}
{activeOverlay === "model" &&
(modelReasoningPrompt ? (
<ModelReasoningSelector
modelLabel={modelReasoningPrompt.modelLabel}
options={modelReasoningPrompt.options}
initialModelId={modelReasoningPrompt.initialModelId}
onSelect={(selectedModelId) => {
setModelReasoningPrompt(null);
void handleModelSelect(selectedModelId, null, {
skipReasoningPrompt: true,
});
}}
onCancel={() => setModelReasoningPrompt(null)}
/>
) : (
<ModelSelector
currentModelId={currentModelId ?? undefined}
onSelect={handleModelSelect}
onCancel={closeOverlay}
filterProvider={modelSelectorOptions.filterProvider}
forceRefresh={modelSelectorOptions.forceRefresh}
billingTier={billingTier ?? undefined}
isSelfHosted={(() => {
const settings = settingsManager.getSettings();
const baseURL =
process.env.LETTA_BASE_URL ||
settings.env?.LETTA_BASE_URL ||
"https://api.letta.com";
return !baseURL.includes("api.letta.com");
})()}
/>
))}
{activeOverlay === "sleeptime" && (
<SleeptimeSelector

View File

@@ -0,0 +1,152 @@
import { Box, useInput } from "ink";
import { useEffect, useMemo, useState } from "react";
import type { ModelReasoningEffort } from "../../agent/model";
import { useTerminalWidth } from "../hooks/useTerminalWidth";
import { colors } from "./colors";
import { Text } from "./Text";
const SOLID_LINE = "─";
const EFFORT_BLOCK = "▌";
interface ReasoningOption {
effort: ModelReasoningEffort;
modelId: string;
}
interface ModelReasoningSelectorProps {
modelLabel: string;
options: ReasoningOption[];
initialModelId: string;
onSelect: (modelId: string) => void;
onCancel: () => void;
}
function formatEffortLabel(effort: ModelReasoningEffort): string {
if (effort === "none") return "Off";
if (effort === "xhigh") return "Max";
if (effort === "minimal") return "Minimal";
return effort.charAt(0).toUpperCase() + effort.slice(1);
}
export function ModelReasoningSelector({
modelLabel,
options,
initialModelId,
onSelect,
onCancel,
}: ModelReasoningSelectorProps) {
const terminalWidth = useTerminalWidth();
const solidLine = SOLID_LINE.repeat(Math.max(terminalWidth, 10));
const [selectedIndex, setSelectedIndex] = useState(() => {
const idx = options.findIndex(
(option) => option.modelId === initialModelId,
);
return idx >= 0 ? idx : 0;
});
useEffect(() => {
const idx = options.findIndex(
(option) => option.modelId === initialModelId,
);
if (idx >= 0) {
setSelectedIndex(idx);
}
}, [options, initialModelId]);
const selectedOption = options[selectedIndex] ?? options[0];
const effortOptions = useMemo(
() => options.filter((option) => option.effort !== "none"),
[options],
);
const totalBars = Math.max(effortOptions.length, 1);
const selectedBars = useMemo(() => {
if (!selectedOption) return 0;
if (selectedOption.effort === "none") return 0;
const effortIndex = effortOptions.findIndex(
(option) => option.effort === selectedOption.effort,
);
return effortIndex >= 0 ? effortIndex + 1 : 0;
}, [effortOptions, selectedOption]);
useInput((input, key) => {
if (options.length === 0) {
if (key.escape || (key.ctrl && input === "c")) {
onCancel();
}
return;
}
if (key.ctrl && input === "c") {
onCancel();
return;
}
if (key.escape) {
onCancel();
return;
}
if (key.return) {
if (selectedOption) {
onSelect(selectedOption.modelId);
}
return;
}
if (key.leftArrow) {
setSelectedIndex((prev) =>
prev === 0 ? options.length - 1 : Math.max(0, prev - 1),
);
return;
}
if (key.rightArrow || key.tab) {
setSelectedIndex((prev) => (prev + 1) % options.length);
}
});
const effortLabel = selectedOption
? formatEffortLabel(selectedOption.effort)
: "Medium";
const selectedText =
selectedBars > 0 ? EFFORT_BLOCK.repeat(selectedBars) : "";
const remainingBars =
totalBars > selectedBars
? EFFORT_BLOCK.repeat(totalBars - selectedBars)
: "";
return (
<Box flexDirection="column">
<Text dimColor>{"> /model"}</Text>
<Text dimColor>{solidLine}</Text>
<Box height={1} />
<Text bold color={colors.selector.title}>
Set your model&apos;s reasoning settings
</Text>
<Box height={1} />
<Box paddingLeft={1}>
<Text>{modelLabel}</Text>
</Box>
<Box height={1} />
<Box paddingLeft={1} flexDirection="row">
<Text color={colors.selector.itemHighlighted}>{selectedText}</Text>
<Text dimColor>{remainingBars}</Text>
<Text> </Text>
<Text bold>{effortLabel}</Text>
<Text dimColor> reasoning effort</Text>
</Box>
<Box height={1} />
<Box paddingLeft={1}>
<Text dimColor>Enter select · /Tab switch · Esc back</Text>
</Box>
</Box>
);
}

View File

@@ -5,6 +5,7 @@ import {
clearAvailableModelsCache,
getAvailableModelHandles,
getAvailableModelsCacheInfo,
getCachedModelHandles,
} from "../../agent/available-models";
import { models } from "../../agent/model";
import { useTerminalWidth } from "../hooks/useTerminalWidth";
@@ -90,17 +91,20 @@ export function ModelSelector({
const [category, setCategory] = useState<ModelCategory>(defaultCategory);
const [selectedIndex, setSelectedIndex] = useState(0);
const cachedHandlesAtMount = useMemo(() => getCachedModelHandles(), []);
// undefined: not loaded yet (show spinner)
// Set<string>: loaded and filtered
// null: error fallback (show all models + warning)
const [availableHandles, setAvailableHandles] = useState<
Set<string> | null | undefined
>(undefined);
const [allApiHandles, setAllApiHandles] = useState<string[]>([]);
const [isLoading, setIsLoading] = useState(true);
>(cachedHandlesAtMount ?? undefined);
const [allApiHandles, setAllApiHandles] = useState<string[]>(
cachedHandlesAtMount ? Array.from(cachedHandlesAtMount) : [],
);
const [isLoading, setIsLoading] = useState(cachedHandlesAtMount === null);
const [error, setError] = useState<string | null>(null);
const [isCached, setIsCached] = useState(false);
const [isCached, setIsCached] = useState(cachedHandlesAtMount !== null);
const [refreshing, setRefreshing] = useState(false);
const [searchQuery, setSearchQuery] = useState("");
@@ -148,9 +152,25 @@ export function ModelSelector({
loadModels.current(forceRefreshOnMount ?? false);
}, [forceRefreshOnMount]);
// Handles from models.json (for filtering "all" category)
const staticModelHandles = useMemo(
() => new Set(typedModels.map((m) => m.handle)),
const pickPreferredStaticModel = useCallback(
(handle: string): UiModel | undefined => {
const staticCandidates = typedModels.filter((m) => m.handle === handle);
return (
staticCandidates.find((m) => m.isDefault) ??
staticCandidates.find((m) => m.isFeatured) ??
staticCandidates.find(
(m) =>
(m.updateArgs as { reasoning_effort?: unknown } | undefined)
?.reasoning_effort === "medium",
) ??
staticCandidates.find(
(m) =>
(m.updateArgs as { reasoning_effort?: unknown } | undefined)
?.reasoning_effort === "high",
) ??
staticCandidates[0]
);
},
[typedModels],
);
@@ -203,15 +223,47 @@ export function ModelSelector({
[],
);
// All other models: API handles not in models.json and not BYOK
const otherModelHandles = useMemo(() => {
const filtered = allApiHandles.filter(
(handle) => !staticModelHandles.has(handle) && !isByokHandle(handle),
);
if (!searchQuery) return filtered;
// Letta API (all): all non-BYOK handles from API, including recommended models.
const allLettaModels = useMemo(() => {
if (availableHandles === undefined) return [];
const modelsForHandles = allApiHandles
.filter((handle) => !isByokHandle(handle))
.map((handle) => {
const staticModel = pickPreferredStaticModel(handle);
if (staticModel) {
return {
...staticModel,
id: handle,
handle,
};
}
return {
id: handle,
handle,
label: handle,
description: "",
} satisfies UiModel;
});
if (!searchQuery) {
return modelsForHandles;
}
const query = searchQuery.toLowerCase();
return filtered.filter((handle) => handle.toLowerCase().includes(query));
}, [allApiHandles, staticModelHandles, searchQuery, isByokHandle]);
return modelsForHandles.filter(
(model) =>
model.label.toLowerCase().includes(query) ||
model.description.toLowerCase().includes(query) ||
model.handle.toLowerCase().includes(query),
);
}, [
availableHandles,
allApiHandles,
isByokHandle,
pickPreferredStaticModel,
searchQuery,
]);
// Provider name mappings for BYOK -> models.json lookup
// Maps BYOK provider prefix to models.json provider prefix
@@ -251,7 +303,7 @@ export function ModelSelector({
const matched: UiModel[] = [];
for (const handle of byokHandles) {
const baseHandle = toBaseHandle(handle);
const staticModel = typedModels.find((m) => m.handle === baseHandle);
const staticModel = pickPreferredStaticModel(baseHandle);
if (staticModel) {
// Use models.json data but with the BYOK handle as the ID
matched.push({
@@ -277,23 +329,17 @@ export function ModelSelector({
}, [
availableHandles,
allApiHandles,
typedModels,
pickPreferredStaticModel,
searchQuery,
isByokHandle,
toBaseHandle,
]);
// BYOK (all): BYOK handles from API that don't have matching models.json entries
// BYOK (all): all BYOK handles from API (including recommended ones)
const byokAllModels = useMemo(() => {
if (availableHandles === undefined) return [];
// Get BYOK handles that don't have a match in models.json (using alias)
const byokHandles = allApiHandles.filter((handle) => {
if (!isByokHandle(handle)) return false;
const baseHandle = toBaseHandle(handle);
// Exclude if there's a matching entry in models.json
return !staticModelHandles.has(baseHandle);
});
const byokHandles = allApiHandles.filter(isByokHandle);
// Apply search filter
let filtered = byokHandles;
@@ -305,14 +351,7 @@ export function ModelSelector({
}
return filtered;
}, [
availableHandles,
allApiHandles,
staticModelHandles,
searchQuery,
isByokHandle,
toBaseHandle,
]);
}, [availableHandles, allApiHandles, searchQuery, isByokHandle]);
// Server-recommended models: models.json entries available on the server (for self-hosted)
// Filter out letta/letta-free legacy model
@@ -374,19 +413,13 @@ export function ModelSelector({
description: "",
}));
}
// For "all" category, convert handles to simple UiModel objects
return otherModelHandles.map((handle) => ({
id: handle,
handle,
label: handle,
description: "",
}));
return allLettaModels;
}, [
category,
supportedModels,
byokModels,
byokAllModels,
otherModelHandles,
allLettaModels,
serverRecommendedModels,
serverAllModels,
]);
@@ -538,7 +571,7 @@ export function ModelSelector({
if (cat === "server-recommended")
return `Recommended [${serverRecommendedModels.length}]`;
if (cat === "server-all") return `All models [${serverAllModels.length}]`;
return `Letta API (all) [${otherModelHandles.length}]`;
return `Letta API (all) [${allLettaModels.length}]`;
};
const getCategoryDescription = (cat: ModelCategory) => {

View File

@@ -3,15 +3,67 @@
"id": "sonnet",
"handle": "anthropic/claude-sonnet-4-6",
"label": "Sonnet 4.6",
"description": "Anthropic's new Sonnet model with adaptive thinking",
"description": "Anthropic's new Sonnet model (high reasoning)",
"isDefault": true,
"isFeatured": true,
"updateArgs": {
"context_window": 200000,
"max_output_tokens": 128000,
"reasoning_effort": "high",
"enable_reasoner": true
}
},
{
"id": "sonnet-4.6-no-reasoning",
"handle": "anthropic/claude-sonnet-4-6",
"label": "Sonnet 4.6",
"description": "Sonnet 4.6 with no reasoning (faster)",
"updateArgs": {
"context_window": 200000,
"max_output_tokens": 128000,
"reasoning_effort": "none",
"enable_reasoner": false
}
},
{
"id": "sonnet-4.6-low",
"handle": "anthropic/claude-sonnet-4-6",
"label": "Sonnet 4.6",
"description": "Sonnet 4.6 (low reasoning)",
"updateArgs": {
"context_window": 200000,
"max_output_tokens": 128000,
"reasoning_effort": "low",
"enable_reasoner": true,
"max_reasoning_tokens": 4000
}
},
{
"id": "sonnet-4.6-medium",
"handle": "anthropic/claude-sonnet-4-6",
"label": "Sonnet 4.6",
"description": "Sonnet 4.6 (med reasoning)",
"updateArgs": {
"context_window": 200000,
"max_output_tokens": 128000,
"reasoning_effort": "medium",
"enable_reasoner": true,
"max_reasoning_tokens": 12000
}
},
{
"id": "sonnet-4.6-xhigh",
"handle": "anthropic/claude-sonnet-4-6",
"label": "Sonnet 4.6",
"description": "Sonnet 4.6 (max reasoning)",
"updateArgs": {
"context_window": 200000,
"max_output_tokens": 128000,
"reasoning_effort": "xhigh",
"enable_reasoner": true,
"max_reasoning_tokens": 31999
}
},
{
"id": "sonnet-4.5",
"handle": "anthropic/claude-sonnet-4-5-20250929",
@@ -39,14 +91,66 @@
"id": "opus",
"handle": "anthropic/claude-opus-4-6",
"label": "Opus 4.6",
"description": "Anthropic's best model with adaptive thinking",
"description": "Anthropic's best model (high reasoning)",
"isFeatured": true,
"updateArgs": {
"context_window": 200000,
"max_output_tokens": 128000,
"reasoning_effort": "high",
"enable_reasoner": true
}
},
{
"id": "opus-4.6-no-reasoning",
"handle": "anthropic/claude-opus-4-6",
"label": "Opus 4.6",
"description": "Opus 4.6 with no reasoning (faster)",
"updateArgs": {
"context_window": 200000,
"max_output_tokens": 128000,
"reasoning_effort": "none",
"enable_reasoner": false
}
},
{
"id": "opus-4.6-low",
"handle": "anthropic/claude-opus-4-6",
"label": "Opus 4.6",
"description": "Opus 4.6 (low reasoning)",
"updateArgs": {
"context_window": 200000,
"max_output_tokens": 128000,
"reasoning_effort": "low",
"enable_reasoner": true,
"max_reasoning_tokens": 4000
}
},
{
"id": "opus-4.6-medium",
"handle": "anthropic/claude-opus-4-6",
"label": "Opus 4.6",
"description": "Opus 4.6 (med reasoning)",
"updateArgs": {
"context_window": 200000,
"max_output_tokens": 128000,
"reasoning_effort": "medium",
"enable_reasoner": true,
"max_reasoning_tokens": 12000
}
},
{
"id": "opus-4.6-xhigh",
"handle": "anthropic/claude-opus-4-6",
"label": "Opus 4.6",
"description": "Opus 4.6 (max reasoning)",
"updateArgs": {
"context_window": 200000,
"max_output_tokens": 128000,
"reasoning_effort": "xhigh",
"enable_reasoner": true,
"max_reasoning_tokens": 31999
}
},
{
"id": "opus-4.5",
"handle": "anthropic/claude-opus-4-5-20251101",
@@ -82,6 +186,30 @@
"max_output_tokens": 64000
}
},
{
"id": "gpt-5.3-codex-plus-pro-none",
"handle": "chatgpt-plus-pro/gpt-5.3-codex",
"label": "GPT-5.3 Codex",
"description": "GPT-5.3 Codex (no reasoning) via ChatGPT Plus/Pro",
"updateArgs": {
"reasoning_effort": "none",
"verbosity": "medium",
"context_window": 272000,
"max_output_tokens": 128000
}
},
{
"id": "gpt-5.3-codex-plus-pro-low",
"handle": "chatgpt-plus-pro/gpt-5.3-codex",
"label": "GPT-5.3 Codex",
"description": "GPT-5.3 Codex (low reasoning) via ChatGPT Plus/Pro",
"updateArgs": {
"reasoning_effort": "low",
"verbosity": "medium",
"context_window": 272000,
"max_output_tokens": 128000
}
},
{
"id": "gpt-5.3-codex-plus-pro-medium",
"handle": "chatgpt-plus-pro/gpt-5.3-codex",
@@ -99,6 +227,7 @@
"handle": "chatgpt-plus-pro/gpt-5.3-codex",
"label": "GPT-5.3 Codex",
"description": "GPT-5.3 Codex (high reasoning) via ChatGPT Plus/Pro",
"isFeatured": true,
"updateArgs": {
"reasoning_effort": "high",
"verbosity": "medium",
@@ -106,6 +235,18 @@
"max_output_tokens": 128000
}
},
{
"id": "gpt-5.3-codex-plus-pro-xhigh",
"handle": "chatgpt-plus-pro/gpt-5.3-codex",
"label": "GPT-5.3 Codex",
"description": "GPT-5.3 Codex (max reasoning) via ChatGPT Plus/Pro",
"updateArgs": {
"reasoning_effort": "xhigh",
"verbosity": "medium",
"context_window": 272000,
"max_output_tokens": 128000
}
},
{
"id": "gpt-5.2-codex-plus-pro-medium",
"handle": "chatgpt-plus-pro/gpt-5.2-codex",

View File

@@ -1,6 +1,9 @@
import { describe, expect, test } from "bun:test";
import { getModelInfoForLlmConfig } from "../agent/model";
import {
getModelInfoForLlmConfig,
getReasoningTierOptionsForHandle,
} from "../agent/model";
describe("getModelInfoForLlmConfig", () => {
test("selects gpt-5.2 tier by reasoning_effort", () => {
@@ -25,3 +28,90 @@ describe("getModelInfoForLlmConfig", () => {
expect(info?.id).toBe("gpt-5.2-none");
});
});
describe("getReasoningTierOptionsForHandle", () => {
test("returns ordered reasoning options for gpt-5.2-codex", () => {
const options = getReasoningTierOptionsForHandle("openai/gpt-5.2-codex");
expect(options.map((option) => option.effort)).toEqual([
"none",
"low",
"medium",
"high",
"xhigh",
]);
expect(options.map((option) => option.modelId)).toEqual([
"gpt-5.2-codex-none",
"gpt-5.2-codex-low",
"gpt-5.2-codex-medium",
"gpt-5.2-codex-high",
"gpt-5.2-codex-xhigh",
]);
});
test("returns byok reasoning options for chatgpt-plus-pro gpt-5.3-codex", () => {
const options = getReasoningTierOptionsForHandle(
"chatgpt-plus-pro/gpt-5.3-codex",
);
expect(options.map((option) => option.effort)).toEqual([
"none",
"low",
"medium",
"high",
"xhigh",
]);
expect(options.map((option) => option.modelId)).toEqual([
"gpt-5.3-codex-plus-pro-none",
"gpt-5.3-codex-plus-pro-low",
"gpt-5.3-codex-plus-pro-medium",
"gpt-5.3-codex-plus-pro-high",
"gpt-5.3-codex-plus-pro-xhigh",
]);
});
test("returns reasoning options for anthropic sonnet 4.6", () => {
const options = getReasoningTierOptionsForHandle(
"anthropic/claude-sonnet-4-6",
);
expect(options.map((option) => option.effort)).toEqual([
"none",
"low",
"medium",
"high",
"xhigh",
]);
expect(options.map((option) => option.modelId)).toEqual([
"sonnet-4.6-no-reasoning",
"sonnet-4.6-low",
"sonnet-4.6-medium",
"sonnet",
"sonnet-4.6-xhigh",
]);
});
test("returns reasoning options for anthropic opus 4.6", () => {
const options = getReasoningTierOptionsForHandle(
"anthropic/claude-opus-4-6",
);
expect(options.map((option) => option.effort)).toEqual([
"none",
"low",
"medium",
"high",
"xhigh",
]);
expect(options.map((option) => option.modelId)).toEqual([
"opus-4.6-no-reasoning",
"opus-4.6-low",
"opus-4.6-medium",
"opus",
"opus-4.6-xhigh",
]);
});
test("returns empty options for models without reasoning tiers", () => {
const options = getReasoningTierOptionsForHandle(
"anthropic/claude-haiku-4-5-20251001",
);
expect(options).toEqual([]);
});
});