fix: improve model/reasoning display in footer and slash preview (#1008)

This commit is contained in:
Charles Packer
2026-02-17 22:23:47 -08:00
committed by GitHub
parent 245390adb0
commit b505ea6117
5 changed files with 103 additions and 4 deletions

View File

@@ -192,6 +192,22 @@ export function getModelUpdateArgs(
* @returns The model entry if found, null otherwise * @returns The model entry if found, null otherwise
*/ */
function findModelByHandle(handle: string): (typeof models)[number] | null { function findModelByHandle(handle: string): (typeof models)[number] | null {
const pickPreferred = (candidates: (typeof models)[number][]) =>
candidates.find((m) => m.isDefault) ??
candidates.find((m) => m.isFeatured) ??
candidates.find(
(m) =>
(m.updateArgs as { reasoning_effort?: unknown } | undefined)
?.reasoning_effort === "medium",
) ??
candidates.find(
(m) =>
(m.updateArgs as { reasoning_effort?: unknown } | undefined)
?.reasoning_effort === "high",
) ??
candidates[0] ??
null;
// Try exact match first // Try exact match first
const exactMatch = models.find((m) => m.handle === handle); const exactMatch = models.find((m) => m.handle === handle);
if (exactMatch) return exactMatch; if (exactMatch) return exactMatch;
@@ -204,7 +220,7 @@ function findModelByHandle(handle: string): (typeof models)[number] | null {
const modelPortion = rest.join("/"); const modelPortion = rest.join("/");
// Find models with the same provider where the model portion is contained // Find models with the same provider where the model portion is contained
// in the models.json handle (handles vendor prefixes and version suffixes) // in the models.json handle (handles vendor prefixes and version suffixes)
const partialMatch = models.find((m) => { const providerMatches = models.filter((m) => {
if (!m.handle.startsWith(`${provider}/`)) return false; if (!m.handle.startsWith(`${provider}/`)) return false;
const mModelPortion = m.handle.slice(provider.length + 1); const mModelPortion = m.handle.slice(provider.length + 1);
// Check if either contains the other (handles both directions) // Check if either contains the other (handles both directions)
@@ -213,7 +229,17 @@ function findModelByHandle(handle: string): (typeof models)[number] | null {
modelPortion.includes(mModelPortion) modelPortion.includes(mModelPortion)
); );
}); });
if (partialMatch) return partialMatch; const providerMatch = pickPreferred(providerMatches);
if (providerMatch) return providerMatch;
// Cross-provider fallback by model suffix. This helps when llm_config reports
// provider_type=openai for BYOK models that are represented in models.json
// under a different provider prefix (e.g. chatgpt-plus-pro/*).
const suffixMatches = models.filter((m) =>
m.handle.endsWith(`/${modelPortion}`),
);
const suffixMatch = pickPreferred(suffixMatches);
if (suffixMatch) return suffixMatch;
} }
return null; return null;

View File

@@ -1295,6 +1295,20 @@ export default function App({
currentModelLabel.split("/").pop()) currentModelLabel.split("/").pop())
: null; : null;
const currentModelProvider = llmConfig?.provider_name ?? null; const currentModelProvider = llmConfig?.provider_name ?? null;
const llmReasoningEffort = llmConfig?.reasoning_effort;
const llmEnableReasoner = (llmConfig as { enable_reasoner?: boolean | null })
?.enable_reasoner;
const currentReasoningEffort: ModelReasoningEffort | null =
llmReasoningEffort === "none" ||
llmReasoningEffort === "minimal" ||
llmReasoningEffort === "low" ||
llmReasoningEffort === "medium" ||
llmReasoningEffort === "high" ||
llmReasoningEffort === "xhigh"
? llmReasoningEffort
: llmEnableReasoner === false
? "none"
: null;
// Billing tier for conditional UI and error context (fetched once on mount) // Billing tier for conditional UI and error context (fetched once on mount)
const [billingTier, setBillingTier] = useState<string | null>(null); const [billingTier, setBillingTier] = useState<string | null>(null);
@@ -11177,6 +11191,7 @@ Plan file path: ${planFilePath}`;
agentName={agentName} agentName={agentName}
currentModel={currentModelDisplay} currentModel={currentModelDisplay}
currentModelProvider={currentModelProvider} currentModelProvider={currentModelProvider}
currentReasoningEffort={currentReasoningEffort}
messageQueue={messageQueue} messageQueue={messageQueue}
onEnterQueueEditMode={handleEnterQueueEditMode} onEnterQueueEditMode={handleEnterQueueEditMode}
onEscapeCancel={ onEscapeCancel={

View File

@@ -1,6 +1,7 @@
import { Box } from "ink"; import { Box } from "ink";
import Link from "ink-link"; import Link from "ink-link";
import { memo, useMemo } from "react"; import { memo, useMemo } from "react";
import type { ModelReasoningEffort } from "../../agent/model";
import { DEFAULT_AGENT_NAME } from "../../constants"; import { DEFAULT_AGENT_NAME } from "../../constants";
import { settingsManager } from "../../settings-manager"; import { settingsManager } from "../../settings-manager";
import { getVersion } from "../../version"; import { getVersion } from "../../version";
@@ -10,16 +11,32 @@ import { Text } from "./Text";
interface AgentInfoBarProps { interface AgentInfoBarProps {
agentId?: string; agentId?: string;
agentName?: string | null; agentName?: string | null;
currentModel?: string | null;
currentReasoningEffort?: ModelReasoningEffort | null;
serverUrl?: string; serverUrl?: string;
conversationId?: string; conversationId?: string;
} }
function formatReasoningLabel(
effort: ModelReasoningEffort | null | undefined,
): string | null {
if (effort === "none") return "no";
if (effort === "xhigh") return "max";
if (effort === "minimal") return "minimal";
if (effort === "low") return "low";
if (effort === "medium") return "medium";
if (effort === "high") return "high";
return null;
}
/** /**
* Shows agent info bar with current agent details and useful links. * Shows agent info bar with current agent details and useful links.
*/ */
export const AgentInfoBar = memo(function AgentInfoBar({ export const AgentInfoBar = memo(function AgentInfoBar({
agentId, agentId,
agentName, agentName,
currentModel,
currentReasoningEffort,
serverUrl, serverUrl,
conversationId, conversationId,
}: AgentInfoBarProps) { }: AgentInfoBarProps) {
@@ -38,6 +55,10 @@ export const AgentInfoBar = memo(function AgentInfoBar({
? `https://app.letta.com/agents/${agentId}${conversationId && conversationId !== "default" ? `?conversation=${conversationId}` : ""}` ? `https://app.letta.com/agents/${agentId}${conversationId && conversationId !== "default" ? `?conversation=${conversationId}` : ""}`
: ""; : "";
const showBottomBar = agentId && agentId !== "loading"; const showBottomBar = agentId && agentId !== "loading";
const reasoningLabel = formatReasoningLabel(currentReasoningEffort);
const modelLine = currentModel
? `${currentModel}${reasoningLabel ? ` (${reasoningLabel})` : ""}`
: null;
if (!showBottomBar) { if (!showBottomBar) {
return null; return null;
@@ -101,9 +122,15 @@ export const AgentInfoBar = memo(function AgentInfoBar({
{!isCloudUser && <Text dimColor>{serverUrl}</Text>} {!isCloudUser && <Text dimColor>{serverUrl}</Text>}
</Box> </Box>
{/* Alien + Agent ID */} {/* Model summary */}
<Box> <Box>
<Text color={colors.footer.agentName}>{alienLines[2]}</Text> <Text color={colors.footer.agentName}>{alienLines[2]}</Text>
<Text dimColor>{modelLine ?? "model unknown"}</Text>
</Box>
{/* Agent ID */}
<Box>
<Text>{alienLines[3]}</Text>
<Text dimColor>{agentId}</Text> <Text dimColor>{agentId}</Text>
</Box> </Box>

View File

@@ -1,5 +1,6 @@
import { Box } from "ink"; import { Box } from "ink";
import { useEffect } from "react"; import { useEffect } from "react";
import type { ModelReasoningEffort } from "../../agent/model";
import { AgentInfoBar } from "./AgentInfoBar"; import { AgentInfoBar } from "./AgentInfoBar";
import { FileAutocomplete } from "./FileAutocomplete"; import { FileAutocomplete } from "./FileAutocomplete";
import { SlashCommandAutocomplete } from "./SlashCommandAutocomplete"; import { SlashCommandAutocomplete } from "./SlashCommandAutocomplete";
@@ -13,6 +14,8 @@ interface InputAssistProps {
onAutocompleteActiveChange: (isActive: boolean) => void; onAutocompleteActiveChange: (isActive: boolean) => void;
agentId?: string; agentId?: string;
agentName?: string | null; agentName?: string | null;
currentModel?: string | null;
currentReasoningEffort?: ModelReasoningEffort | null;
serverUrl?: string; serverUrl?: string;
workingDirectory?: string; workingDirectory?: string;
conversationId?: string; conversationId?: string;
@@ -33,6 +36,8 @@ export function InputAssist({
onAutocompleteActiveChange, onAutocompleteActiveChange,
agentId, agentId,
agentName, agentName,
currentModel,
currentReasoningEffort,
serverUrl, serverUrl,
workingDirectory, workingDirectory,
conversationId, conversationId,
@@ -80,6 +85,8 @@ export function InputAssist({
<AgentInfoBar <AgentInfoBar
agentId={agentId} agentId={agentId}
agentName={agentName} agentName={agentName}
currentModel={currentModel}
currentReasoningEffort={currentReasoningEffort}
serverUrl={serverUrl} serverUrl={serverUrl}
conversationId={conversationId} conversationId={conversationId}
/> />

View File

@@ -17,6 +17,7 @@ import {
useState, useState,
} from "react"; } from "react";
import stringWidth from "string-width"; import stringWidth from "string-width";
import type { ModelReasoningEffort } from "../../agent/model";
import { LETTA_CLOUD_API_URL } from "../../auth/oauth"; import { LETTA_CLOUD_API_URL } from "../../auth/oauth";
import { import {
ELAPSED_DISPLAY_THRESHOLD_MS, ELAPSED_DISPLAY_THRESHOLD_MS,
@@ -50,6 +51,18 @@ function truncateEnd(value: string, maxChars: number): string {
return `${value.slice(0, maxChars - 3)}...`; return `${value.slice(0, maxChars - 3)}...`;
} }
function getReasoningEffortTag(
effort: ModelReasoningEffort | null | undefined,
): string | null {
if (effort === "none") return "no";
if (effort === "xhigh") return "max";
if (effort === "minimal") return "minimal";
if (effort === "low") return "low";
if (effort === "medium") return "medium";
if (effort === "high") return "high";
return null;
}
/** /**
* Represents a visual line segment in the text. * Represents a visual line segment in the text.
* A visual line ends at either a newline character or when it reaches lineWidth. * A visual line ends at either a newline character or when it reaches lineWidth.
@@ -203,6 +216,7 @@ const InputFooter = memo(function InputFooter({
showExitHint, showExitHint,
agentName, agentName,
currentModel, currentModel,
currentReasoningEffort,
isOpenAICodexProvider, isOpenAICodexProvider,
isByokProvider, isByokProvider,
hideFooter, hideFooter,
@@ -219,6 +233,7 @@ const InputFooter = memo(function InputFooter({
showExitHint: boolean; showExitHint: boolean;
agentName: string | null | undefined; agentName: string | null | undefined;
currentModel: string | null | undefined; currentModel: string | null | undefined;
currentReasoningEffort?: ModelReasoningEffort | null;
isOpenAICodexProvider: boolean; isOpenAICodexProvider: boolean;
isByokProvider: boolean; isByokProvider: boolean;
hideFooter: boolean; hideFooter: boolean;
@@ -230,10 +245,13 @@ const InputFooter = memo(function InputFooter({
const hideFooterContent = hideFooter; const hideFooterContent = hideFooter;
const maxAgentChars = Math.max(10, Math.floor(rightColumnWidth * 0.45)); const maxAgentChars = Math.max(10, Math.floor(rightColumnWidth * 0.45));
const displayAgentName = truncateEnd(agentName || "Unnamed", maxAgentChars); const displayAgentName = truncateEnd(agentName || "Unnamed", maxAgentChars);
const reasoningTag = getReasoningEffortTag(currentReasoningEffort);
const byokExtraChars = isByokProvider ? 2 : 0; // " ▲" const byokExtraChars = isByokProvider ? 2 : 0; // " ▲"
const reservedChars = displayAgentName.length + byokExtraChars + 4; const reservedChars = displayAgentName.length + byokExtraChars + 4;
const maxModelChars = Math.max(8, rightColumnWidth - reservedChars); const maxModelChars = Math.max(8, rightColumnWidth - reservedChars);
const displayModel = truncateEnd(currentModel ?? "unknown", maxModelChars); const modelWithReasoning =
(currentModel ?? "unknown") + (reasoningTag ? ` (${reasoningTag})` : "");
const displayModel = truncateEnd(modelWithReasoning, maxModelChars);
const rightTextLength = const rightTextLength =
displayAgentName.length + displayModel.length + byokExtraChars + 3; displayAgentName.length + displayModel.length + byokExtraChars + 3;
const rightPrefixSpaces = Math.max(0, rightColumnWidth - rightTextLength); const rightPrefixSpaces = Math.max(0, rightColumnWidth - rightTextLength);
@@ -522,6 +540,7 @@ export function Input({
agentName, agentName,
currentModel, currentModel,
currentModelProvider, currentModelProvider,
currentReasoningEffort,
messageQueue, messageQueue,
onEnterQueueEditMode, onEnterQueueEditMode,
onEscapeCancel, onEscapeCancel,
@@ -561,6 +580,7 @@ export function Input({
agentName?: string | null; agentName?: string | null;
currentModel?: string | null; currentModel?: string | null;
currentModelProvider?: string | null; currentModelProvider?: string | null;
currentReasoningEffort?: ModelReasoningEffort | null;
messageQueue?: QueuedMessage[]; messageQueue?: QueuedMessage[];
onEnterQueueEditMode?: () => void; onEnterQueueEditMode?: () => void;
onEscapeCancel?: () => void; onEscapeCancel?: () => void;
@@ -1293,6 +1313,8 @@ export function Input({
onAutocompleteActiveChange={setIsAutocompleteActive} onAutocompleteActiveChange={setIsAutocompleteActive}
agentId={agentId} agentId={agentId}
agentName={agentName} agentName={agentName}
currentModel={currentModel}
currentReasoningEffort={currentReasoningEffort}
serverUrl={serverUrl} serverUrl={serverUrl}
workingDirectory={process.cwd()} workingDirectory={process.cwd()}
conversationId={conversationId} conversationId={conversationId}
@@ -1307,6 +1329,7 @@ export function Input({
showExitHint={ralphActive || ralphPending} showExitHint={ralphActive || ralphPending}
agentName={agentName} agentName={agentName}
currentModel={currentModel} currentModel={currentModel}
currentReasoningEffort={currentReasoningEffort}
isOpenAICodexProvider={ isOpenAICodexProvider={
currentModelProvider === OPENAI_CODEX_PROVIDER_NAME currentModelProvider === OPENAI_CODEX_PROVIDER_NAME
} }
@@ -1354,6 +1377,7 @@ export function Input({
ralphActive, ralphActive,
ralphPending, ralphPending,
currentModel, currentModel,
currentReasoningEffort,
currentModelProvider, currentModelProvider,
hideFooter, hideFooter,
footerRightColumnWidth, footerRightColumnWidth,